rtt更新

This commit is contained in:
2025-01-18 13:25:25 +08:00
parent c6a7554b51
commit d6009a0773
726 changed files with 103376 additions and 6270 deletions

View File

@@ -0,0 +1,49 @@
menuconfig RT_USING_PCI
bool "Using Peripheral Component Interconnect Express (PCIe/PCI)"
depends on RT_USING_DM
depends on RT_USING_PIC
select RT_USING_ADT
select RT_USING_ADT_BITMAP
default n
config RT_PCI_MSI
bool "PCI MSI/MSI-X"
depends on RT_USING_PCI
default y
config RT_PCI_ENDPOINT
bool "PCI Endpoint"
depends on RT_USING_PCI
select RT_USING_ADT_REF
default n
config RT_PCI_SYS_64BIT
bool "PCI System 64bit"
depends on RT_USING_PCI
depends on ARCH_CPU_64BIT
default y
config RT_PCI_CACHE_LINE_SIZE
int "PCI Cache line size"
depends on RT_USING_PCI
default 8 if ARCH_CPU_64BIT
default 4
config RT_PCI_LOCKLESS
bool "PCI Lock less in options"
depends on RT_USING_PCI
default n
if RT_USING_PCI
comment "PCI Device Drivers"
config RT_PCI_ECAM
bool "PCIe ECAM"
depends on RT_USING_PCI
default y
help
PCIe Express Enhanced Configuration Access Mechanism
rsource "host/Kconfig"
endif

View File

@@ -0,0 +1,28 @@
from building import *
objs = []
if not GetDepend(['RT_USING_PCI']):
Return('objs')
cwd = GetCurrentDir()
list = os.listdir(cwd)
CPPPATH = [cwd + '/../include']
src = ['access.c', 'host-bridge.c', 'irq.c', 'pci.c', 'pme.c', 'probe.c']
if GetDepend(['RT_USING_OFW']):
src += ['ofw.c']
if GetDepend(['RT_PCI_ECAM']):
src += ['ecam.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
objs = objs + group
Return('objs')

View File

@@ -0,0 +1,159 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <drivers/pci.h>
struct rt_spinlock rt_pci_lock = { 0 };
#ifdef RT_PCI_LOCKLESS
#define pci_lock_config(l) do { (void)(l); } while (0)
#define pci_unlock_config(l) do { (void)(l); } while (0)
#else
#define pci_lock_config(l) l = rt_spin_lock_irqsave(&rt_pci_lock)
#define pci_unlock_config(l) rt_spin_unlock_irqrestore(&rt_pci_lock, l)
#endif
#define PCI_OPS_READ(name, type) \
rt_err_t rt_pci_bus_read_config_##name(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg, type *value) \
{ \
rt_err_t err; \
rt_ubase_t level; \
rt_uint32_t data = 0; \
pci_lock_config(level); \
err = bus->ops->read(bus, devfn, reg, sizeof(type), &data); \
*value = err ? (type)(~0) : (type)data; \
pci_unlock_config(level); \
return err; \
}
#define PCI_OPS_WRITE(name, type) \
rt_err_t rt_pci_bus_write_config_##name(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg, type value) \
{ \
rt_err_t err; \
rt_ubase_t level; \
pci_lock_config(level); \
err = bus->ops->write(bus, devfn, reg, sizeof(type), value); \
pci_unlock_config(level); \
return err; \
}
#define PCI_OPS(name, type) \
PCI_OPS_READ(name, type) \
PCI_OPS_WRITE(name, type)
PCI_OPS(u8, rt_uint8_t)
PCI_OPS(u16, rt_uint16_t)
PCI_OPS(u32, rt_uint32_t)
#undef PCI_OP_WRITE
#undef PCI_OP_READ
#undef PCI_OPS
rt_err_t rt_pci_bus_read_config_uxx(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
{
void *base;
if ((base = bus->ops->map(bus, devfn, reg)))
{
if (width == 1)
{
*value = HWREG8(base);
}
else if (width == 2)
{
*value = HWREG16(base);
}
else
{
*value = HWREG32(base);
}
return RT_EOK;
}
return -RT_ERROR;
}
rt_err_t rt_pci_bus_write_config_uxx(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
{
void *base;
if ((base = bus->ops->map(bus, devfn, reg)))
{
if (width == 1)
{
HWREG8(base) = value;
}
else if (width == 2)
{
HWREG16(base) = value;
}
else
{
HWREG32(base) = value;
}
return RT_EOK;
}
return -RT_ERROR;
}
rt_err_t rt_pci_bus_read_config_generic_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
{
void *base;
if ((base = bus->ops->map(bus, devfn, reg)))
{
*value = HWREG32(base);
if (width <= 2)
{
*value = (*value >> (8 * (reg & 3))) & ((1 << (width * 8)) - 1);
}
return RT_EOK;
}
return -RT_ERROR;
}
rt_err_t rt_pci_bus_write_config_generic_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
{
void *base;
if ((base = bus->ops->map(bus, devfn, reg & ~0x3)))
{
if (width == 4)
{
HWREG32(base) = value;
}
else
{
rt_uint32_t mask, tmp;
mask = ~(((1 << (width * 8)) - 1) << ((reg & 0x3) * 8));
tmp = HWREG32(base) & mask;
tmp |= value << ((reg & 0x3) * 8);
HWREG32(base) = tmp;
}
return RT_EOK;
}
return -RT_ERROR;
}

View File

@@ -0,0 +1,72 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rthw.h>
#include <rtthread.h>
#define DBG_TAG "pci.ecam"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "ecam.h"
struct pci_ecam_config_window *pci_ecam_create(struct rt_pci_host_bridge *host_bridge,
const struct pci_ecam_ops *ops)
{
struct pci_ecam_config_window *conf_win = rt_calloc(1, sizeof(*conf_win));
if (!conf_win)
{
return RT_NULL;
}
conf_win->bus_range = host_bridge->bus_range;
conf_win->bus_shift = ops->bus_shift;
conf_win->ops = ops;
host_bridge->ops = (const struct rt_pci_ops *)&ops->pci_ops;
return conf_win;
}
void *pci_ecam_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int where)
{
struct pci_ecam_config_window *conf_win = bus->sysdata;
const struct pci_ecam_ops *eops = conf_win->ops;
void *win = conf_win->win, *map;
rt_uint32_t busn = bus->number, bus_shift = eops->bus_shift, devfn_shift = bus_shift - 8;
busn -= conf_win->bus_range[0];
if (bus_shift)
{
rt_uint32_t bus_offset = (busn & PCIE_ECAM_BUS_MASK) << bus_shift;
rt_uint32_t devfn_offset = (devfn & PCIE_ECAM_DEVFN_MASK) << devfn_shift;
where &= PCIE_ECAM_REG_MASK;
map = win + (bus_offset | devfn_offset | where);
}
else
{
map = win + PCIE_ECAM_OFFSET(busn, devfn, where);
}
return map;
}
const struct pci_ecam_ops pci_generic_ecam_ops =
{
.pci_ops =
{
.map = pci_ecam_map,
.read = rt_pci_bus_read_config_uxx,
.write = rt_pci_bus_write_config_uxx,
}
};

View File

@@ -0,0 +1,69 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#ifndef __RT_PCI_ECAM_H__
#define __RT_PCI_ECAM_H__
#include <drivers/pci.h>
#include <drivers/ofw.h>
#include <drivers/ofw_io.h>
#include <drivers/platform.h>
/*
* Memory address shift values for the byte-level address that
* can be used when accessing the PCI Express Configuration Space.
*/
/*
* Enhanced Configuration Access Mechanism (ECAM)
*
* See PCI Express Base Specification, Revision 5.0, Version 1.0,
* Section 7.2.2, Table 7-1, p. 677.
*/
#define PCIE_ECAM_BUS_SHIFT 20 /* Bus number */
#define PCIE_ECAM_DEVFN_SHIFT 12 /* Device and Function number */
#define PCIE_ECAM_BUS_MASK 0xff
#define PCIE_ECAM_DEVFN_MASK 0xff
#define PCIE_ECAM_REG_MASK 0xfff /* Limit offset to a maximum of 4K */
#define PCIE_ECAM_BUS(x) (((x) & PCIE_ECAM_BUS_MASK) << PCIE_ECAM_BUS_SHIFT)
#define PCIE_ECAM_DEVFN(x) (((x) & PCIE_ECAM_DEVFN_MASK) << PCIE_ECAM_DEVFN_SHIFT)
#define PCIE_ECAM_REG(x) ((x) & PCIE_ECAM_REG_MASK)
#define PCIE_ECAM_OFFSET(bus, devfn, where) \
(PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEVFN(devfn) | PCIE_ECAM_REG(where))
struct pci_ecam_ops
{
rt_uint32_t bus_shift;
const struct rt_pci_ops pci_ops;
};
struct pci_ecam_config_window
{
rt_uint32_t *bus_range;
rt_uint32_t bus_shift;
void *win;
void *priv;
const struct pci_ecam_ops *ops;
};
/* Default ECAM ops */
extern const struct pci_ecam_ops pci_generic_ecam_ops;
void *pci_ecam_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int where);
struct pci_ecam_config_window *pci_ecam_create(struct rt_pci_host_bridge *host_bridge,
const struct pci_ecam_ops *ops);
rt_err_t pci_host_common_probe(struct rt_platform_device *pdev);
rt_err_t pci_host_common_remove(struct rt_platform_device *pdev);
#endif /* __RT_PCI_ECAM_H__ */

View File

@@ -0,0 +1,15 @@
from building import *
group = []
if not GetDepend(['RT_PCI_ENDPOINT']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../../include']
src = ['endpoint.c', 'mem.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,504 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#include <drivers/pci_endpoint.h>
#define DBG_TAG "pci.ep"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static rt_list_t _ep_nodes = RT_LIST_OBJECT_INIT(_ep_nodes);
static struct rt_spinlock _ep_lock = { 0 };
rt_err_t rt_pci_ep_write_header(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_header *hdr)
{
rt_err_t err;
if (ep && ep->ops && hdr && func_no < ep->max_functions)
{
if (ep->ops->write_header)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->write_header(ep, func_no, hdr);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_set_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx)
{
rt_err_t err = RT_EOK;
if (ep && ep->ops && func_no < ep->max_functions && bar &&
bar_idx < PCI_STD_NUM_BARS)
{
struct rt_pci_bus_resource *bus_bar = &bar->bus;
if (bar_idx == (PCI_STD_NUM_BARS - 1) &&
(bus_bar->flags & PCIM_BAR_MEM_TYPE_64))
{
err = -RT_EINVAL;
LOG_E("%s: Set BAR[%d] can't not 64bit", ep->name, bar_idx);
}
if (rt_upper_32_bits(bus_bar->size) &&
!(bus_bar->flags & PCIM_BAR_MEM_TYPE_64))
{
err = -RT_EINVAL;
LOG_E("%s: Set BAR[%d] size is no support 64bit", ep->name, bar_idx);
}
if ((bus_bar->flags & PCIM_BAR_SPACE_IO) &&
(bus_bar->flags & PCIM_BAR_IO_MASK))
{
err = -RT_EINVAL;
LOG_E("%s: Set BAR[%d] io flags is invalid", ep->name, bar_idx);
}
if (!err)
{
if (ep->ops->set_bar)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->set_bar(ep, func_no, bar, bar_idx);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_clear_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && bar &&
bar_idx < PCI_STD_NUM_BARS)
{
if (ep->ops->clear_bar)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->clear_bar(ep, func_no, bar, bar_idx);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_map_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && size)
{
if (ep->ops->map_addr)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->map_addr(ep, func_no, addr, pci_addr, size);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_unmap_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions)
{
if (ep->ops->unmap_addr)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->unmap_addr(ep, func_no, addr);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_set_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions)
{
if (ep->ops->set_msix)
{
err = -RT_EINVAL;
for (int log2 = 0; log2 < 5; ++log2)
{
if (irq_nr <= (1 << log2))
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->set_msi(ep, func_no, log2);
rt_mutex_release(&ep->lock);
}
}
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_get_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && out_irq_nr)
{
if (ep->ops->get_msi)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->get_msi(ep, func_no, out_irq_nr);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_set_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr, int bar_idx, rt_off_t offset)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && irq_nr < 2048 &&
bar_idx < PCI_STD_NUM_BARS)
{
if (ep->ops->set_msix)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->set_msix(ep, func_no, irq_nr, bar_idx, offset);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_get_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && out_irq_nr)
{
if (ep->ops->get_msix)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->get_msix(ep, func_no, out_irq_nr);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_raise_irq(struct rt_pci_ep *ep, rt_uint8_t func_no,
enum rt_pci_ep_irq type, unsigned irq)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions)
{
if (ep->ops->raise_irq)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->raise_irq(ep, func_no, type, irq);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_start(struct rt_pci_ep *ep)
{
rt_err_t err;
if (ep && ep->ops)
{
if (ep->ops->start)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->start(ep);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_stop(struct rt_pci_ep *ep)
{
rt_err_t err;
if (ep && ep->ops)
{
if (ep->ops->stop)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->stop(ep);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_register(struct rt_pci_ep *ep)
{
rt_ubase_t level;
if (!ep || !ep->ops)
{
return -RT_EINVAL;
}
rt_list_init(&ep->list);
rt_ref_init(&ep->ref);
rt_list_init(&ep->epf_nodes);
rt_mutex_init(&ep->lock, ep->name, RT_IPC_FLAG_PRIO);
level = rt_spin_lock_irqsave(&_ep_lock);
rt_list_insert_before(&_ep_nodes, &ep->list);
rt_spin_unlock_irqrestore(&_ep_lock, level);
return RT_EOK;
}
rt_err_t rt_pci_ep_unregister(struct rt_pci_ep *ep)
{
rt_ubase_t level;
rt_err_t err = RT_EOK;
if (!ep)
{
return -RT_EINVAL;
}
level = rt_spin_lock_irqsave(&_ep_lock);
if (rt_ref_read(&ep->ref) > 1)
{
err = -RT_EBUSY;
}
else
{
rt_list_remove(&ep->list);
rt_mutex_detach(&ep->lock);
}
rt_spin_unlock_irqrestore(&_ep_lock, level);
return err;
}
rt_err_t rt_pci_ep_add_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf)
{
rt_err_t err = RT_EOK;
if (!ep || !epf || !epf->name)
{
return -RT_EINVAL;
}
if (epf->func_no > ep->max_functions - 1)
{
LOG_E("%s function No(%d) > %s max function No(%d - 1)",
epf->name, epf->func_no, ep->name, ep->max_functions);
return -RT_EINVAL;
}
epf->ep = ep;
rt_list_init(&epf->list);
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
if (!rt_bitmap_test_bit(ep->functions_map, epf->func_no))
{
rt_bitmap_set_bit(ep->functions_map, epf->func_no);
rt_list_insert_before(&ep->epf_nodes, &epf->list);
}
else
{
err = -RT_EINVAL;
LOG_E("%s function No(%d) is repeating", epf->name, epf->func_no);
}
rt_mutex_release(&ep->lock);
return err;
}
rt_err_t rt_pci_ep_remove_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf)
{
if (!ep || !epf)
{
return -RT_EINVAL;
}
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
rt_bitmap_clear_bit(ep->functions_map, epf->func_no);
rt_list_remove(&epf->list);
rt_mutex_release(&ep->lock);
return RT_EOK;
}
struct rt_pci_ep *rt_pci_ep_get(const char *name)
{
rt_ubase_t level;
struct rt_pci_ep *ep = RT_NULL, *ep_tmp;
level = rt_spin_lock_irqsave(&_ep_lock);
rt_list_for_each_entry(ep_tmp, &_ep_nodes, list)
{
if (!name || !rt_strcmp(ep_tmp->name, name))
{
ep = ep_tmp;
rt_ref_get(&ep->ref);
break;
}
}
rt_spin_unlock_irqrestore(&_ep_lock, level);
return ep;
}
static void pci_ep_release(struct rt_ref *ref)
{
struct rt_pci_ep *ep = rt_container_of(ref, struct rt_pci_ep, ref);
rt_pci_ep_unregister(ep);
}
void rt_pci_ep_put(struct rt_pci_ep *ep)
{
if (ep)
{
rt_ref_put(&ep->ref, &pci_ep_release);
}
}

View File

@@ -0,0 +1,205 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#include <drivers/pci_endpoint.h>
#define DBG_TAG "pci.ep.mem"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
rt_err_t rt_pci_ep_mem_array_init(struct rt_pci_ep *ep,
struct rt_pci_ep_mem *mems, rt_size_t mems_nr)
{
rt_size_t idx;
rt_err_t err = RT_EOK;
if (!ep || !mems)
{
return -RT_EINVAL;
}
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
ep->mems_nr = mems_nr;
ep->mems = rt_calloc(mems_nr, sizeof(*ep->mems));
if (!ep->mems)
{
return -RT_ENOMEM;
}
for (idx = 0; idx < mems_nr; ++idx)
{
struct rt_pci_ep_mem *mem = &ep->mems[idx];
mem->cpu_addr = mems->cpu_addr;
mem->size = mems->size;
mem->page_size = mems->page_size;
mem->bits = mems->size / mems->page_size;
mem->map = rt_calloc(RT_BITMAP_LEN(mem->bits), sizeof(*mem->map));
if (!mem->map)
{
err = -RT_ENOMEM;
goto _out_lock;
}
}
_out_lock:
if (err)
{
while (idx --> 0)
{
rt_free(ep->mems[idx].map);
}
rt_free(ep->mems);
ep->mems_nr = 0;
ep->mems = RT_NULL;
}
rt_mutex_release(&ep->lock);
return err;
}
rt_err_t rt_pci_ep_mem_init(struct rt_pci_ep *ep,
rt_ubase_t cpu_addr, rt_size_t size, rt_size_t page_size)
{
struct rt_pci_ep_mem mem;
if (!ep)
{
return -RT_EINVAL;
}
mem.cpu_addr = cpu_addr;
mem.size = size;
mem.page_size = page_size;
return rt_pci_ep_mem_array_init(ep, &mem, 1);
}
static rt_ubase_t bitmap_region_alloc(struct rt_pci_ep_mem *mem, rt_size_t size)
{
rt_size_t bit, next_bit, end_bit, max_bits;
size /= mem->page_size;
max_bits = mem->bits - size;
rt_bitmap_for_each_clear_bit(mem->map, bit, max_bits)
{
end_bit = bit + size;
for (next_bit = bit + 1; next_bit < end_bit; ++next_bit)
{
if (rt_bitmap_test_bit(mem->map, next_bit))
{
bit = next_bit;
goto _next;
}
}
if (next_bit == end_bit)
{
while (next_bit --> bit)
{
rt_bitmap_set_bit(mem->map, next_bit);
}
return mem->cpu_addr + bit * mem->page_size;
}
_next:
}
return ~0ULL;
}
static void bitmap_region_free(struct rt_pci_ep_mem *mem,
rt_ubase_t cpu_addr, rt_size_t size)
{
rt_size_t bit = (cpu_addr - mem->cpu_addr) / mem->page_size, end_bit;
size /= mem->page_size;
end_bit = bit + size;
for (; bit < end_bit; ++bit)
{
rt_bitmap_clear_bit(mem->map, bit);
}
}
void *rt_pci_ep_mem_alloc(struct rt_pci_ep *ep,
rt_ubase_t *out_cpu_addr, rt_size_t size)
{
void *vaddr = RT_NULL;
if (!ep || !out_cpu_addr)
{
return vaddr;
}
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
for (rt_size_t idx = 0; idx < ep->mems_nr; ++idx)
{
rt_ubase_t cpu_addr;
struct rt_pci_ep_mem *mem = &ep->mems[idx];
cpu_addr = bitmap_region_alloc(mem, size);
if (cpu_addr != ~0ULL)
{
vaddr = rt_ioremap((void *)cpu_addr, size);
if (!vaddr)
{
bitmap_region_free(mem, cpu_addr, size);
/* Try next memory */
continue;
}
*out_cpu_addr = cpu_addr;
break;
}
}
rt_mutex_release(&ep->lock);
return vaddr;
}
void rt_pci_ep_mem_free(struct rt_pci_ep *ep,
void *vaddr, rt_ubase_t cpu_addr, rt_size_t size)
{
if (!ep || !vaddr || !size)
{
return;
}
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
for (rt_size_t idx = 0; idx < ep->mems_nr; ++idx)
{
struct rt_pci_ep_mem *mem = &ep->mems[idx];
if (mem->cpu_addr > cpu_addr &&
mem->cpu_addr + mem->size >= cpu_addr + size)
{
rt_iounmap(mem);
bitmap_region_free(mem, cpu_addr, size);
break;
}
}
rt_mutex_release(&ep->lock);
}

View File

@@ -0,0 +1,129 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rtthread.h>
#include <drivers/pm.h>
#include <drivers/pci.h>
#ifdef RT_USING_PM
struct host_bridge_pm_status
{
rt_uint8_t mode;
rt_bool_t enable;
};
static const enum rt_pci_power system_pci_pm_mode[] =
{
[PM_SLEEP_MODE_NONE] = RT_PCI_D0,
[PM_SLEEP_MODE_IDLE] = RT_PCI_D3HOT,
[PM_SLEEP_MODE_LIGHT] = RT_PCI_D1,
[PM_SLEEP_MODE_DEEP] = RT_PCI_D1,
[PM_SLEEP_MODE_STANDBY] = RT_PCI_D2,
[PM_SLEEP_MODE_SHUTDOWN] = RT_PCI_D3COLD,
};
static rt_bool_t pci_device_pm_ops(struct rt_pci_device *pdev, void *data)
{
struct host_bridge_pm_status *status = data;
rt_pci_enable_wake(pdev, system_pci_pm_mode[status->mode], status->enable);
/* To find all devices, always return false */
return RT_FALSE;
}
static rt_err_t host_bridge_pm_suspend(const struct rt_device *device, rt_uint8_t mode)
{
struct host_bridge_pm_status status;
struct rt_pci_device *pdev = rt_container_of(device, struct rt_pci_device, parent);
status.mode = mode;
status.enable = RT_FALSE;
rt_pci_enum_device(pdev->bus, pci_device_pm_ops, &status);
return RT_EOK;
}
static void host_bridge_pm_resume(const struct rt_device *device, rt_uint8_t mode)
{
struct host_bridge_pm_status status;
struct rt_pci_device *pdev = rt_container_of(device, struct rt_pci_device, parent);
status.mode = mode;
status.enable = RT_TRUE;
rt_pci_enum_device(pdev->bus, pci_device_pm_ops, &status);
}
static const struct rt_device_pm_ops host_bridge_pm_ops =
{
.suspend = host_bridge_pm_suspend,
.resume = host_bridge_pm_resume,
};
#endif /* RT_USING_PM */
static void host_bridge_free(struct rt_pci_device *pdev)
{
#ifdef RT_USING_PM
rt_pm_device_unregister(&pdev->parent);
#endif
}
static rt_err_t host_bridge_probe(struct rt_pci_device *pdev)
{
rt_err_t err = RT_EOK;
rt_pci_set_master(pdev);
#ifdef RT_USING_PM
rt_pm_device_register(&pdev->parent, &host_bridge_pm_ops);
#endif
return err;
}
static rt_err_t host_bridge_remove(struct rt_pci_device *pdev)
{
host_bridge_free(pdev);
rt_pci_clear_master(pdev);
return RT_EOK;
}
static rt_err_t host_bridge_shutdown(struct rt_pci_device *pdev)
{
host_bridge_free(pdev);
return RT_EOK;
}
static const struct rt_pci_device_id host_bridge_pci_ids[] =
{
/* PCI host bridges */
{ RT_PCI_DEVICE_ID(PCI_VENDOR_ID_REDHAT, 0x0008) },
/* Any PCI-Express port */
{ RT_PCI_DEVICE_CLASS(PCIS_BRIDGE_PCI_NORMAL, ~0) },
/* PCI-to-PCI bridge */
{ RT_PCI_DEVICE_CLASS(PCIS_BRIDGE_PCI_SUBTRACTIVE, ~0) },
/* Any Root Complex Event Collector */
{ RT_PCI_DEVICE_CLASS(((PCIS_SYSTEM_RCEC << 8) | 0x00), ~0) },
{ /* sentinel */ }
};
static struct rt_pci_driver host_bridge_driver =
{
.name = "host-bridge",
.ids = host_bridge_pci_ids,
.probe = host_bridge_probe,
.remove = host_bridge_remove,
.shutdown = host_bridge_shutdown,
};
RT_PCI_DRIVER_EXPORT(host_bridge_driver);

View File

@@ -0,0 +1,12 @@
config RT_PCI_HOST_COMMON
bool "Common PCI host controller"
depends on RT_PCI_ECAM
default y
config RT_PCI_HOST_GENERIC
bool "Generic PCI host controller"
depends on RT_PCI_ECAM
select RT_PCI_HOST_COMMON
default y
rsource "dw/Kconfig"

View File

@@ -0,0 +1,25 @@
from building import *
objs = []
cwd = GetCurrentDir()
list = os.listdir(cwd)
CPPPATH = [cwd + '/../../include']
src = []
if GetDepend(['RT_PCI_HOST_COMMON']):
src += ['pci-host-common.c']
if GetDepend(['RT_PCI_HOST_GENERIC']):
src += ['pci-host-generic.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
objs = objs + group
Return('objs')

View File

@@ -0,0 +1,13 @@
config RT_PCI_DW
bool "DesignWare-based PCIe"
depends on RT_MFD_SYSCON
depends on RT_USING_DMA
default n
config RT_PCI_DW_HOST
bool
depends on RT_PCI_DW
config RT_PCI_DW_EP
bool
depends on RT_PCI_DW

View File

@@ -0,0 +1,21 @@
from building import *
group = []
if not GetDepend(['RT_PCI_DW']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../../../include']
src = ['pcie-dw.c', 'pcie-dw_platfrom.c']
if GetDepend(['RT_PCI_DW_HOST']):
src += ['pcie-dw_host.c']
if GetDepend(['RT_PCI_DW_EP']):
src += ['pcie-dw_ep.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,645 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-23 GuEe-GUI first version
*/
#define DBG_TAG "pcie.dw"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "pcie-dw.h"
static rt_uint8_t __dw_pcie_find_next_cap(struct dw_pcie *pci,
rt_uint8_t cap_ptr, rt_uint8_t cap)
{
rt_uint16_t reg;
rt_uint8_t cap_id, next_cap_ptr;
if (!cap_ptr)
{
return 0;
}
reg = dw_pcie_readw_dbi(pci, cap_ptr);
cap_id = (reg & 0x00ff);
if (cap_id > PCIY_MAX)
{
return 0;
}
if (cap_id == cap)
{
return cap_ptr;
}
next_cap_ptr = (reg & 0xff00) >> 8;
return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
}
rt_uint8_t dw_pcie_find_capability(struct dw_pcie *pci, rt_uint8_t cap)
{
rt_uint16_t reg;
rt_uint8_t next_cap_ptr;
reg = dw_pcie_readw_dbi(pci, PCIR_CAP_PTR);
next_cap_ptr = (reg & 0x00ff);
return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
}
static rt_uint16_t dw_pcie_find_next_ext_capability(struct dw_pcie *pci,
rt_uint16_t start, rt_uint8_t cap)
{
rt_uint32_t header;
int ttl, pos = PCI_REGMAX + 1;
/* minimum 8 bytes per capability */
ttl = ((PCIE_REGMAX + 1) - (PCI_REGMAX + 1)) / 8;
if (start)
{
pos = start;
}
header = dw_pcie_readl_dbi(pci, pos);
/*
* If we have no capabilities, this is indicated by cap ID,
* cap version and next pointer all being 0.
*/
if (header == 0)
{
return 0;
}
while (ttl-- > 0)
{
if (PCI_EXTCAP_ID(header) == cap && pos != start)
{
return pos;
}
pos = PCI_EXTCAP_NEXTPTR(header);
if (pos < PCI_REGMAX + 1)
{
break;
}
header = dw_pcie_readl_dbi(pci, pos);
}
return 0;
}
rt_uint16_t dw_pcie_find_ext_capability(struct dw_pcie *pci, rt_uint8_t cap)
{
return dw_pcie_find_next_ext_capability(pci, 0, cap);
}
rt_err_t dw_pcie_read(void *addr, rt_size_t size, rt_uint32_t *out_val)
{
/* Check aligned */
if ((rt_ubase_t)addr & ((rt_ubase_t)size - 1))
{
*out_val = 0;
return -RT_EINVAL;
}
if (size == 4)
{
*out_val = HWREG32(addr);
}
else if (size == 2)
{
*out_val = HWREG16(addr);
}
else if (size == 1)
{
*out_val = HWREG8(addr);
}
else
{
*out_val = 0;
return -RT_EINVAL;
}
return RT_EOK;
}
rt_err_t dw_pcie_write(void *addr, rt_size_t size, rt_uint32_t val)
{
/* Check aligned */
if ((rt_ubase_t)addr & ((rt_ubase_t)size - 1))
{
return -RT_EINVAL;
}
if (size == 4)
{
HWREG32(addr) = val;
}
else if (size == 2)
{
HWREG16(addr) = val;
}
else if (size == 1)
{
HWREG8(addr) = val;
}
else
{
return -RT_EINVAL;
}
return RT_EOK;
}
rt_uint32_t dw_pcie_read_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size)
{
rt_err_t err;
rt_uint32_t val = 0;
if (pci->ops->read_dbi)
{
return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
}
if ((err = dw_pcie_read(pci->dbi_base + reg, size, &val)))
{
LOG_E("Read DBI address error = %s", rt_strerror(err));
}
return val;
}
void dw_pcie_write_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val)
{
rt_err_t err;
if (pci->ops->write_dbi)
{
pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
return;
}
if ((err = dw_pcie_write(pci->dbi_base + reg, size, val)))
{
LOG_E("Write DBI address error = %s", rt_strerror(err));
}
}
void dw_pcie_write_dbi2(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val)
{
rt_err_t err;
if (pci->ops && pci->ops->write_dbi2)
{
pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
return;
}
if ((err = dw_pcie_write(pci->dbi_base2 + reg, size, val)))
{
LOG_E("Write DBI2 address error = %s", rt_strerror(err));
}
}
rt_uint32_t dw_pcie_readl_atu(struct dw_pcie *pci, rt_uint32_t reg)
{
rt_err_t err;
rt_uint32_t val = 0;
if (pci->ops->read_dbi)
{
return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
}
if ((err = dw_pcie_read(pci->atu_base + reg, 4, &val)))
{
LOG_E("Read ATU address error = %s", rt_strerror(err));
}
return val;
}
void dw_pcie_writel_atu(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val)
{
rt_err_t err;
if (pci->ops->write_dbi)
{
pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
return;
}
if ((err = dw_pcie_write(pci->atu_base + reg, 4, val)))
{
LOG_E("Write ATU address error = %s", rt_strerror(err));
}
}
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, rt_uint8_t func_no,
int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
{
rt_uint64_t limit_addr = cpu_addr + size - 1;
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
rt_lower_32_bits(cpu_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
rt_upper_32_bits(cpu_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
rt_lower_32_bits(limit_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
rt_upper_32_bits(limit_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
rt_lower_32_bits(pci_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
rt_upper_32_bits(pci_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
type | PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
{
if (dw_pcie_readl_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2) & PCIE_ATU_ENABLE)
{
return;
}
rt_thread_mdelay(LINK_WAIT_IATU);
}
LOG_E("Outbound iATU is not being enabled");
}
static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no,
int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
{
if (pci->ops->cpu_addr_fixup)
{
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
}
if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN)
{
dw_pcie_prog_outbound_atu_unroll(pci, func_no,
index, type, cpu_addr, pci_addr, size);
return;
}
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | index);
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, rt_lower_32_bits(cpu_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, rt_upper_32_bits(cpu_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, rt_lower_32_bits(cpu_addr + size - 1));
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, rt_lower_32_bits(pci_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, rt_upper_32_bits(pci_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
{
if (dw_pcie_readl_dbi(pci, PCIE_ATU_CR2) & PCIE_ATU_ENABLE)
{
return;
}
rt_thread_mdelay(LINK_WAIT_IATU);
}
LOG_E("Outbound iATU is not being enabled");
}
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
{
__dw_pcie_prog_outbound_atu(pci, 0, index, type, cpu_addr, pci_addr, size);
}
void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no,
int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
{
__dw_pcie_prog_outbound_atu(pci, func_no, index, type, cpu_addr, pci_addr, size);
}
static rt_err_t dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci,
rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr,
enum dw_pcie_aspace_type aspace_type)
{
int type;
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
rt_lower_32_bits(cpu_addr));
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
rt_upper_32_bits(cpu_addr));
switch (aspace_type)
{
case DW_PCIE_ASPACE_MEM:
type = PCIE_ATU_TYPE_MEM;
break;
case DW_PCIE_ASPACE_IO:
type = PCIE_ATU_TYPE_IO;
break;
default:
return -RT_EINVAL;
}
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
type | PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_ENABLE |
PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
{
if (dw_pcie_readl_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2) & PCIE_ATU_ENABLE)
{
return RT_EOK;
}
rt_thread_mdelay(LINK_WAIT_IATU);
}
LOG_E("Inbound iATU is not being enabled");
return -RT_EBUSY;
}
rt_err_t dw_pcie_prog_inbound_atu(struct dw_pcie *pci,
rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr,
enum dw_pcie_aspace_type aspace_type)
{
int type;
if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN)
{
return dw_pcie_prog_inbound_atu_unroll(pci, func_no,
index, bar, cpu_addr, aspace_type);
}
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | index);
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, rt_lower_32_bits(cpu_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, rt_upper_32_bits(cpu_addr));
switch (aspace_type)
{
case DW_PCIE_ASPACE_MEM:
type = PCIE_ATU_TYPE_MEM;
break;
case DW_PCIE_ASPACE_IO:
type = PCIE_ATU_TYPE_IO;
break;
default:
return -RT_EINVAL;
}
dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
{
if (dw_pcie_readl_dbi(pci, PCIE_ATU_CR2) & PCIE_ATU_ENABLE)
{
return RT_EOK;
}
rt_thread_mdelay(LINK_WAIT_IATU);
}
LOG_E("Inbound iATU is not being enabled");
return -RT_EBUSY;
}
void dw_pcie_disable_atu(struct dw_pcie *pci, int index, enum dw_pcie_region_type type)
{
rt_uint32_t region;
switch (type)
{
case DW_PCIE_REGION_INBOUND:
region = PCIE_ATU_REGION_INBOUND;
break;
case DW_PCIE_REGION_OUTBOUND:
region = PCIE_ATU_REGION_OUTBOUND;
break;
default:
return;
}
if (pci->iatu_unroll_enabled)
{
if (region == PCIE_ATU_REGION_INBOUND)
{
dw_pcie_writel_ib_unroll(pci, index,
PCIE_ATU_UNR_REGION_CTRL2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
}
else
{
dw_pcie_writel_ob_unroll(pci, index,
PCIE_ATU_UNR_REGION_CTRL2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
}
}
else
{
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
}
}
rt_err_t dw_pcie_wait_for_link(struct dw_pcie *pci)
{
/* Check if the link is up or not */
for (int retries = 0; retries < LINK_WAIT_MAX_RETRIES; ++retries)
{
if (dw_pcie_link_up(pci))
{
LOG_I("%s: Link up", rt_dm_dev_get_name(pci->dev));
return RT_EOK;
}
rt_hw_us_delay((LINK_WAIT_USLEEP_MIN + LINK_WAIT_USLEEP_MAX) >> 1);
}
LOG_I("PHY link never came up");
return -RT_ETIMEOUT;
}
rt_bool_t dw_pcie_link_up(struct dw_pcie *pci)
{
rt_uint32_t val;
if (pci->ops->link_up)
{
return pci->ops->link_up(pci);
}
val = HWREG32(pci->dbi_base + PCIE_PORT_DEBUG1);
return (val & PCIE_PORT_DEBUG1_LINK_UP) && (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING));
}
void dw_pcie_upconfig_setup(struct dw_pcie *pci)
{
rt_uint32_t val;
val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
val |= PORT_MLTI_UPCFG_SUPPORT;
dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
}
static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, rt_uint32_t link_gen)
{
rt_uint32_t cap, ctrl2, link_speed;
rt_uint8_t offset = dw_pcie_find_capability(pci, PCIY_EXPRESS);
cap = dw_pcie_readl_dbi(pci, offset + PCIER_LINK_CAP);
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCIER_LINK_CTL2);
ctrl2 &= ~PCIEM_LNKCTL2_TLS;
switch (link_gen)
{
case 1: link_speed = PCIEM_LNKCTL2_TLS_2_5GT; break;
case 2: link_speed = PCIEM_LNKCTL2_TLS_5_0GT; break;
case 3: link_speed = PCIEM_LNKCTL2_TLS_8_0GT; break;
case 4: link_speed = PCIEM_LNKCTL2_TLS_16_0GT; break;
default:
/* Use hardware capability */
link_speed = RT_FIELD_GET(PCIEM_LINK_CAP_MAX_SPEED, cap);
ctrl2 &= ~PCIEM_LNKCTL2_HASD;
break;
}
dw_pcie_writel_dbi(pci, offset + PCIER_LINK_CTL2, ctrl2 | link_speed);
cap &= ~((rt_uint32_t)PCIEM_LINK_CAP_MAX_SPEED);
dw_pcie_writel_dbi(pci, offset + PCIER_LINK_CAP, cap | link_speed);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
rt_uint32_t val;
struct rt_device *dev = pci->dev;
if (pci->version >= 0x480a || (!pci->version && dw_pcie_iatu_unroll_enabled(pci)))
{
pci->iatu_unroll_enabled |= DWC_IATU_UNROLL_EN;
if (!pci->atu_base)
{
pci->atu_base = rt_dm_dev_iomap_by_name(dev, "atu");
}
if (!pci->atu_base)
{
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
}
LOG_D("iATU unroll is %sabled", pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN ? "en" : "dis");
if (pci->link_gen > 0)
{
dw_pcie_link_set_max_speed(pci, pci->link_gen);
}
/* Configure Gen1 N_FTS */
if (pci->fts_number[0])
{
val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
val |= PORT_AFR_N_FTS(pci->fts_number[0]);
val |= PORT_AFR_CC_N_FTS(pci->fts_number[0]);
dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
/* Configure Gen2+ N_FTS */
if (pci->fts_number[1])
{
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_N_FTS_MASK;
val |= pci->fts_number[1];
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_FAST_LINK_MODE;
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
if (rt_dm_dev_prop_read_bool(dev, "snps,enable-cdm-check"))
{
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | PCIE_PL_CHK_REG_CHK_REG_START;
dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
}
rt_dm_dev_prop_read_u32(dev, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes)
{
LOG_D("Using h/w default number of lanes");
return;
}
/* Set the number of lanes */
val &= ~PORT_LINK_FAST_LINK_MODE;
val &= ~PORT_LINK_MODE_MASK;
switch (pci->num_lanes)
{
case 1: val |= PORT_LINK_MODE_1_LANES; break;
case 2: val |= PORT_LINK_MODE_2_LANES; break;
case 4: val |= PORT_LINK_MODE_4_LANES; break;
case 8: val |= PORT_LINK_MODE_8_LANES; break;
default:
LOG_E("Invail num-lanes = %d", pci->num_lanes);
return;
}
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
/* Set link width speed control register */
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (pci->num_lanes)
{
case 1: val |= PORT_LOGIC_LINK_WIDTH_1_LANES; break;
case 2: val |= PORT_LOGIC_LINK_WIDTH_2_LANES; break;
case 4: val |= PORT_LOGIC_LINK_WIDTH_4_LANES; break;
case 8: val |= PORT_LOGIC_LINK_WIDTH_8_LANES; break;
}
val |= pci->user_speed;
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}

View File

@@ -0,0 +1,440 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-23 GuEe-GUI first version
*/
#ifndef __PCIE_DESIGNWARE_H__
#define __PCIE_DESIGNWARE_H__
#include <rtthread.h>
#include <rtdevice.h>
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
#define LINK_WAIT_USLEEP_MAX 100000
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_AFR 0x70c
#define PORT_AFR_N_FTS_MASK RT_GENMASK(15, 8)
#define PORT_AFR_N_FTS(n) RT_FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
#define PORT_AFR_CC_N_FTS_MASK RT_GENMASK(23, 16)
#define PORT_AFR_CC_N_FTS(n) RT_FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
#define PORT_AFR_ENTER_ASPM RT_BIT(30)
#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT 24
#define PORT_AFR_L0S_ENTRANCE_LAT_MASK RT_GENMASK(26, 24)
#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT 27
#define PORT_AFR_L1_ENTRANCE_LAT_MASK RT_GENMASK(29, 27)
#define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_LPBK_ENABLE RT_BIT(2)
#define PORT_LINK_DLL_LINK_EN RT_BIT(5)
#define PORT_LINK_FAST_LINK_MODE RT_BIT(7)
#define PORT_LINK_MODE_MASK RT_GENMASK(21, 16)
#define PORT_LINK_MODE(n) RT_FIELD_PREP(PORT_LINK_MODE_MASK, n)
#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
#define PCIE_PORT_DEBUG1 0x72c
#define PCIE_PORT_DEBUG1_LINK_UP RT_BIT(4)
#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING RT_BIT(29)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80c
#define PORT_LOGIC_N_FTS_MASK RT_GENMASK(7, 0)
#define PORT_LOGIC_SPEED_CHANGE RT_BIT(17)
#define PORT_LOGIC_LINK_WIDTH_MASK RT_GENMASK(12, 8)
#define PORT_LOGIC_LINK_WIDTH(n) RT_FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
#define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1)
#define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2)
#define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4)
#define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8)
#define PCIE_MSI_ADDR_LO 0x820
#define PCIE_MSI_ADDR_HI 0x824
#define PCIE_MSI_INTR0_ENABLE 0x828
#define PCIE_MSI_INTR0_MASK 0x82c
#define PCIE_MSI_INTR0_STATUS 0x830
#define PCIE_PORT_MULTI_LANE_CTRL 0x8c0
#define PORT_MLTI_UPCFG_SUPPORT RT_BIT(7)
#define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND RT_BIT(31)
#define PCIE_ATU_REGION_OUTBOUND 0
#define PCIE_ATU_CR1 0x904
#define PCIE_ATU_TYPE_MEM 0x0
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_CR2 0x908
#define PCIE_ATU_ENABLE RT_BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE RT_BIT(30)
#define PCIE_ATU_FUNC_NUM_MATCH_EN RT_BIT(19)
#define PCIE_ATU_LOWER_BASE 0x90c
#define PCIE_ATU_UPPER_BASE 0x910
#define PCIE_ATU_LIMIT 0x914
#define PCIE_ATU_LOWER_TARGET 0x918
#define PCIE_ATU_BUS(x) RT_FIELD_PREP(RT_GENMASK(31, 24), x)
#define PCIE_ATU_DEV(x) RT_FIELD_PREP(RT_GENMASK(23, 19), x)
#define PCIE_ATU_FUNC(x) RT_FIELD_PREP(RT_GENMASK(18, 16), x)
#define PCIE_ATU_UPPER_TARGET 0x91c
#define PCIE_MISC_CONTROL_1_OFF 0x8bc
#define PCIE_DBI_RO_WR_EN RT_BIT(0)
#define PCIE_MSIX_DOORBELL 0x948
#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xb20
#define PCIE_PL_CHK_REG_CHK_REG_START RT_BIT(0)
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS RT_BIT(1)
#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR RT_BIT(16)
#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR RT_BIT(17)
#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE RT_BIT(18)
#define PCIE_PL_CHK_REG_ERR_ADDR 0xb28
/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
#define PCIE_ATU_UNR_REGION_CTRL1 0x00
#define PCIE_ATU_UNR_REGION_CTRL2 0x04
#define PCIE_ATU_UNR_LOWER_BASE 0x08
#define PCIE_ATU_UNR_UPPER_BASE 0x0C
#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
* default; the driver core automatically derives atu_base from dbi_base using
* this offset, if atu_base not set.
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
/* Register address builder */
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((region) << 9)
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) (((region) << 9) | RT_BIT(8))
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
#define MSI_REG_CTRL_BLOCK_SIZE 12
#define MSI_DEF_NUM_VECTORS 32
/* Maximum number of inbound/outbound iATUs */
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
#define DWC_IATU_UNROLL_EN RT_BIT(0)
#define DWC_IATU_IOCFG_SHARED RT_BIT(1)
struct dw_pcie_host_ops;
struct dw_pcie_ep_ops;
struct dw_pcie_ops;
enum dw_pcie_region_type
{
DW_PCIE_REGION_UNKNOWN,
DW_PCIE_REGION_INBOUND,
DW_PCIE_REGION_OUTBOUND,
};
enum dw_pcie_device_mode
{
DW_PCIE_UNKNOWN_TYPE,
DW_PCIE_EP_TYPE,
DW_PCIE_LEG_EP_TYPE,
DW_PCIE_RC_TYPE,
};
enum dw_pcie_aspace_type
{
DW_PCIE_ASPACE_UNKNOWN,
DW_PCIE_ASPACE_MEM,
DW_PCIE_ASPACE_IO,
};
struct dw_pcie_port
{
void *cfg0_base;
rt_uint64_t cfg0_addr;
rt_uint64_t cfg0_size;
rt_ubase_t io_addr;
rt_ubase_t io_bus_addr;
rt_size_t io_size;
const struct dw_pcie_host_ops *ops;
int sys_irq;
int msi_irq;
struct rt_pic *irq_pic;
struct rt_pic *msi_pic;
void *msi_data;
rt_ubase_t msi_data_phy;
rt_uint32_t irq_count;
rt_uint32_t irq_mask[MAX_MSI_CTRLS];
struct rt_pci_host_bridge *bridge;
const struct rt_pci_ops *bridge_child_ops;
struct rt_spinlock lock;
RT_BITMAP_DECLARE(msi_map, MAX_MSI_IRQS);
};
struct dw_pcie_host_ops
{
rt_err_t (*host_init)(struct dw_pcie_port *port);
rt_err_t (*msi_host_init)(struct dw_pcie_port *port);
void (*set_irq_count)(struct dw_pcie_port *port);
};
struct dw_pcie_ep_func
{
rt_list_t list;
rt_uint8_t func_no;
rt_uint8_t msi_cap; /* MSI capability offset */
rt_uint8_t msix_cap; /* MSI-X capability offset */
};
struct dw_pcie_ep
{
struct rt_pci_ep *epc;
struct rt_pci_ep_bar *epc_bar[PCI_STD_NUM_BARS];
rt_list_t func_nodes;
const struct dw_pcie_ep_ops *ops;
rt_uint64_t aspace;
rt_uint64_t aspace_size;
rt_size_t page_size;
rt_uint8_t bar_to_atu[PCI_STD_NUM_BARS];
rt_ubase_t *outbound_addr;
rt_bitmap_t *ib_window_map;
rt_bitmap_t *ob_window_map;
rt_uint32_t num_ib_windows;
rt_uint32_t num_ob_windows;
void *msi_mem;
rt_ubase_t msi_mem_phy;
};
struct dw_pcie_ep_ops
{
rt_err_t (*ep_init)(struct dw_pcie_ep *ep);
rt_err_t (*raise_irq)(struct dw_pcie_ep *ep, rt_uint8_t func_no, enum rt_pci_ep_irq type, unsigned irq);
rt_off_t (*func_select)(struct dw_pcie_ep *ep, rt_uint8_t func_no);
};
struct dw_pcie
{
struct rt_device *dev;
void *dbi_base;
void *dbi_base2;
void *atu_base;
rt_uint32_t version;
rt_uint32_t num_viewport;
rt_uint32_t num_lanes;
rt_uint32_t link_gen;
rt_uint32_t user_speed;
rt_uint8_t iatu_unroll_enabled; /* Internal Address Translation Unit */
rt_uint8_t fts_number[2]; /* Fast Training Sequences */
struct dw_pcie_port port;
struct dw_pcie_ep endpoint;
const struct dw_pcie_ops *ops;
void *priv;
};
struct dw_pcie_ops
{
rt_uint64_t (*cpu_addr_fixup)(struct dw_pcie *pcie, rt_uint64_t cpu_addr);
rt_uint32_t (*read_dbi)(struct dw_pcie *pcie, void *base, rt_uint32_t reg, rt_size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void *base, rt_uint32_t reg, rt_size_t size, rt_uint32_t val);
void (*write_dbi2)(struct dw_pcie *pcie, void *base, rt_uint32_t reg, rt_size_t size, rt_uint32_t val);
rt_bool_t (*link_up)(struct dw_pcie *pcie);
rt_err_t (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
};
#define to_dw_pcie_from_port(ptr) rt_container_of((ptr), struct dw_pcie, port)
#define to_dw_pcie_from_endpoint(ptr) rt_container_of((ptr), struct dw_pcie, endpoint)
#ifdef RT_PCI_DW_HOST
#undef RT_PCI_DW_HOST
#define RT_PCI_DW_HOST 1
#define HOST_API
#define HOST_RET(...) ;
#else
#define HOST_API rt_inline
#define HOST_RET(...) { return __VA_ARGS__; }
#endif
#ifdef RT_PCI_DW_EP
#undef RT_PCI_DW_EP
#define RT_PCI_DW_EP 1
#define EP_API
#define EP_RET(...) ;
#else
#define EP_API rt_inline
#define EP_RET(...) { return __VA_ARGS__; }
#endif
rt_uint8_t dw_pcie_find_capability(struct dw_pcie *pci, rt_uint8_t cap);
rt_uint16_t dw_pcie_find_ext_capability(struct dw_pcie *pci, rt_uint8_t cap);
rt_err_t dw_pcie_read(void *addr, rt_size_t size, rt_uint32_t *out_val);
rt_err_t dw_pcie_write(void *addr, rt_size_t size, rt_uint32_t val);
rt_uint32_t dw_pcie_read_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val);
void dw_pcie_write_dbi2(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val);
rt_uint32_t dw_pcie_readl_atu(struct dw_pcie *pci, rt_uint32_t reg);
void dw_pcie_writel_atu(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val);
rt_bool_t dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
rt_err_t dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size);
void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no, int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size);
rt_err_t dw_pcie_prog_inbound_atu(struct dw_pcie *pci, rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr, enum dw_pcie_aspace_type aspace_type);
void dw_pcie_disable_atu(struct dw_pcie *pci, int index, enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
rt_inline void dw_pcie_writel_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val)
{
dw_pcie_write_dbi(pci, reg, 0x4, val);
}
rt_inline rt_uint32_t dw_pcie_readl_dbi(struct dw_pcie *pci, rt_uint32_t reg)
{
return dw_pcie_read_dbi(pci, reg, 0x4);
}
rt_inline void dw_pcie_writew_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_uint16_t val)
{
dw_pcie_write_dbi(pci, reg, 0x2, val);
}
rt_inline rt_uint16_t dw_pcie_readw_dbi(struct dw_pcie *pci, rt_uint32_t reg)
{
return dw_pcie_read_dbi(pci, reg, 0x2);
}
rt_inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_uint8_t val)
{
dw_pcie_write_dbi(pci, reg, 0x1, val);
}
rt_inline rt_uint8_t dw_pcie_readb_dbi(struct dw_pcie *pci, rt_uint32_t reg)
{
return dw_pcie_read_dbi(pci, reg, 0x1);
}
rt_inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val)
{
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
rt_inline void dw_pcie_dbi_ro_writable_enable(struct dw_pcie *pci, rt_bool_t enable)
{
const rt_uint32_t reg = PCIE_MISC_CONTROL_1_OFF;
if (enable)
{
dw_pcie_writel_dbi(pci, reg, dw_pcie_readl_dbi(pci, reg) | PCIE_DBI_RO_WR_EN);
}
else
{
dw_pcie_writel_dbi(pci, reg, dw_pcie_readl_dbi(pci, reg) & ~PCIE_DBI_RO_WR_EN);
}
}
rt_inline rt_uint8_t dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
return dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) == 0xffffffff ? 1 : 0;
}
rt_inline rt_uint32_t dw_pcie_readl_ob_unroll(struct dw_pcie *pci,
rt_uint32_t index, rt_uint32_t reg)
{
return dw_pcie_readl_atu(pci, PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index) + reg);
}
rt_inline void dw_pcie_writel_ob_unroll(struct dw_pcie *pci,
rt_uint32_t index, rt_uint32_t reg, rt_uint32_t val)
{
dw_pcie_writel_atu(pci, PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index) + reg, val);
}
rt_inline rt_uint32_t dw_pcie_readl_ib_unroll(struct dw_pcie *pci,
rt_uint32_t index, rt_uint32_t reg)
{
return dw_pcie_readl_atu(pci, PCIE_GET_ATU_INB_UNR_REG_OFFSET(index) + reg);
}
rt_inline void dw_pcie_writel_ib_unroll(struct dw_pcie *pci,
rt_uint32_t index, rt_uint32_t reg, rt_uint32_t val)
{
dw_pcie_writel_atu(pci, reg + PCIE_GET_ATU_INB_UNR_REG_OFFSET(index), val);
}
HOST_API rt_err_t dw_handle_msi_irq(struct dw_pcie_port *port) HOST_RET(-RT_ENOSYS)
HOST_API void dw_pcie_msi_init(struct dw_pcie_port *port) HOST_RET()
HOST_API void dw_pcie_free_msi(struct dw_pcie_port *port) HOST_RET()
HOST_API void dw_pcie_setup_rc(struct dw_pcie_port *port) HOST_RET()
HOST_API rt_err_t dw_pcie_host_init(struct dw_pcie_port *port) HOST_RET(-RT_ENOSYS)
HOST_API void dw_pcie_host_deinit(struct dw_pcie_port *port) HOST_RET()
HOST_API void dw_pcie_host_free(struct dw_pcie_port *port) HOST_RET()
HOST_API void *dw_pcie_own_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg) HOST_RET(RT_NULL)
EP_API rt_err_t dw_pcie_ep_init(struct dw_pcie_ep *ep) EP_RET(-RT_ENOSYS)
EP_API rt_err_t dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) EP_RET(-RT_ENOSYS)
EP_API void dw_pcie_ep_exit(struct dw_pcie_ep *ep) EP_RET()
EP_API rt_err_t dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no) EP_RET(-RT_ENOSYS)
EP_API rt_err_t dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no, unsigned irq) EP_RET(-RT_ENOSYS)
EP_API rt_err_t dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no, unsigned irq) EP_RET(-RT_ENOSYS)
EP_API rt_err_t dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, rt_uint8_t func_no, unsigned irq) EP_RET(-RT_ENOSYS)
EP_API void dw_pcie_ep_reset_bar(struct dw_pcie *pci, int bar_idx) EP_RET()
EP_API rt_err_t dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
int bar_idx, rt_ubase_t cpu_addr, enum dw_pcie_aspace_type aspace_type) EP_RET(-RT_ENOSYS)
EP_API rt_err_t dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
rt_ubase_t phys_addr, rt_uint64_t pci_addr, rt_size_t size) EP_RET(-RT_ENOSYS)
EP_API struct dw_pcie_ep_func *dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, rt_uint8_t func_no) EP_RET(RT_NULL)
#endif /* __PCIE_DESIGNWARE_H__ */

View File

@@ -0,0 +1,863 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-23 GuEe-GUI first version
*/
#define DBG_TAG "pcie.dw-ep"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "pcie-dw.h"
struct dw_pcie_ep_func *dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, rt_uint8_t func_no)
{
struct dw_pcie_ep_func *ep_func;
rt_list_for_each_entry(ep_func, &ep->func_nodes, list)
{
if (ep_func->func_no == func_no)
{
return ep_func;
}
}
return RT_NULL;
}
static rt_uint8_t dw_pcie_ep_func_select(struct dw_pcie_ep *ep, rt_uint8_t func_no)
{
rt_uint8_t func_offset = 0;
if (ep->ops->func_select)
{
func_offset = ep->ops->func_select(ep, func_no);
}
return func_offset;
}
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, rt_uint8_t func_no,
int bar_idx, int flags)
{
rt_uint32_t reg;
rt_uint8_t func_offset = 0;
struct dw_pcie_ep *ep = &pci->endpoint;
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = func_offset + PCIR_BAR(bar_idx);
dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
dw_pcie_writel_dbi2(pci, reg, 0x0);
dw_pcie_writel_dbi(pci, reg, 0x0);
if (flags & PCIM_BAR_MEM_TYPE_64)
{
dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
dw_pcie_writel_dbi(pci, reg + 4, 0x0);
}
dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
}
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, int bar_idx)
{
rt_uint8_t func_no, funcs = pci->endpoint.epc->max_functions;
for (func_no = 0; func_no < funcs; ++func_no)
{
__dw_pcie_ep_reset_bar(pci, func_no, bar_idx, 0);
}
}
static rt_uint8_t __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, rt_uint8_t func_no,
rt_uint8_t cap_ptr, rt_uint8_t cap)
{
rt_uint16_t reg;
rt_uint8_t func_offset = 0, cap_id, next_cap_ptr;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
if (!cap_ptr)
{
return 0;
}
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
cap_id = (reg & 0x00ff);
if (cap_id > PCIY_MAX)
{
return 0;
}
if (cap_id == cap)
{
return cap_ptr;
}
next_cap_ptr = (reg & 0xff00) >> 8;
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
static rt_uint8_t dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, rt_uint8_t func_no,
rt_uint8_t cap)
{
rt_uint16_t reg;
rt_uint8_t func_offset = 0, next_cap_ptr;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = dw_pcie_readw_dbi(pci, func_offset + PCIR_CAP_PTR);
next_cap_ptr = reg & 0x00ff;
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
rt_err_t dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
int bar_idx, rt_ubase_t cpu_addr, enum dw_pcie_aspace_type aspace_type)
{
rt_err_t err;
rt_uint32_t free_win;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
free_win = rt_bitmap_next_clear_bit(ep->ib_window_map, 0, ep->num_ib_windows);
if (free_win >= ep->num_ib_windows)
{
LOG_E("No free inbound window");
return -RT_EEMPTY;
}
err = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar_idx, cpu_addr, aspace_type);
if (err)
{
LOG_E("Failed to program IB window error = %s", rt_strerror(err));
return err;
}
ep->bar_to_atu[bar_idx] = free_win;
rt_bitmap_set_bit(ep->ib_window_map, free_win);
return RT_EOK;
}
rt_err_t dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
rt_ubase_t phys_addr, rt_uint64_t pci_addr, rt_size_t size)
{
rt_uint32_t free_win;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
free_win = rt_bitmap_next_clear_bit(ep->ob_window_map, 0, ep->num_ob_windows);
if (free_win >= ep->num_ob_windows)
{
LOG_E("No free outbound window");
return -RT_EEMPTY;
}
dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
phys_addr, pci_addr, size);
ep->outbound_addr[free_win] = phys_addr;
rt_bitmap_set_bit(ep->ob_window_map, free_win);
return RT_EOK;
}
static rt_err_t dw_pcie_ep_write_header(struct rt_pci_ep *epc, rt_uint8_t func_no,
struct rt_pci_ep_header *hdr)
{
rt_uint8_t func_offset = 0;
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
func_offset = dw_pcie_ep_func_select(ep, func_no);
dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
dw_pcie_writew_dbi(pci, func_offset + PCIR_VENDOR, hdr->vendor);
dw_pcie_writew_dbi(pci, func_offset + PCIR_DEVICE, hdr->device);
dw_pcie_writeb_dbi(pci, func_offset + PCIR_REVID, hdr->revision);
dw_pcie_writeb_dbi(pci, func_offset + PCIR_PROGIF, hdr->progif);
dw_pcie_writew_dbi(pci, func_offset + PCIR_SUBCLASS, hdr->subclass | hdr->class_code << 8);
dw_pcie_writeb_dbi(pci, func_offset + PCIR_CACHELNSZ, hdr->cache_line_size);
dw_pcie_writew_dbi(pci, func_offset + PCIR_SUBVEND_0, hdr->subsystem_vendor);
dw_pcie_writew_dbi(pci, func_offset + PCIR_SUBDEV_0, hdr->subsystem_device);
dw_pcie_writeb_dbi(pci, func_offset + PCIR_INTPIN, hdr->intx);
dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
return 0;
}
static rt_err_t dw_pcie_ep_clear_bar(struct rt_pci_ep *epc, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx)
{
rt_uint32_t atu_index;
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
atu_index = ep->bar_to_atu[bar_idx];
__dw_pcie_ep_reset_bar(pci, func_no, bar_idx, ep->epc_bar[bar_idx]->bus.flags);
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
rt_bitmap_clear_bit(ep->ib_window_map, atu_index);
ep->epc_bar[bar_idx] = RT_NULL;
return RT_EOK;
}
static rt_err_t dw_pcie_ep_set_bar(struct rt_pci_ep *epc, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx)
{
rt_err_t err;
rt_uint32_t reg;
rt_uint8_t func_offset = 0;
rt_size_t size = bar->bus.size;
rt_ubase_t flags = bar->bus.flags;
enum dw_pcie_aspace_type aspace_type;
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = PCIR_BAR(bar_idx) + func_offset;
if (!(flags & PCIM_BAR_SPACE))
{
aspace_type = DW_PCIE_ASPACE_MEM;
}
else
{
aspace_type = DW_PCIE_ASPACE_IO;
}
err = dw_pcie_ep_inbound_atu(ep, func_no, bar_idx, bar->bus.base, aspace_type);
if (err)
{
return err;
}
dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
dw_pcie_writel_dbi2(pci, reg, rt_lower_32_bits(size - 1));
dw_pcie_writel_dbi(pci, reg, flags);
if (flags & PCIM_BAR_MEM_TYPE_64)
{
dw_pcie_writel_dbi2(pci, reg + 4, rt_upper_32_bits(size - 1));
dw_pcie_writel_dbi(pci, reg + 4, 0);
}
ep->epc_bar[bar_idx] = bar;
dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
return 0;
}
static rt_err_t dw_pcie_find_index(struct dw_pcie_ep *ep,
rt_ubase_t addr, rt_uint32_t *atu_index)
{
for (rt_uint32_t index = 0; index < ep->num_ob_windows; ++index)
{
if (ep->outbound_addr[index] != addr)
{
continue;
}
*atu_index = index;
return RT_EOK;
}
return -RT_EINVAL;
}
static rt_err_t dw_pcie_ep_unmap_addr(struct rt_pci_ep *epc, rt_uint8_t func_no,
rt_ubase_t addr)
{
rt_err_t err;
rt_uint32_t atu_index;
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
if ((err = dw_pcie_find_index(ep, addr, &atu_index)))
{
return err;
}
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
rt_bitmap_clear_bit(ep->ob_window_map, atu_index);
return RT_EOK;
}
static rt_err_t dw_pcie_ep_map_addr(struct rt_pci_ep *epc, rt_uint8_t func_no,
rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size)
{
rt_err_t err;
struct dw_pcie_ep *ep = epc->priv;
err = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
if (err)
{
LOG_E("Failed to enable address error = %s", rt_strerror(err));
return err;
}
return RT_EOK;
}
static rt_err_t dw_pcie_ep_set_msi(struct rt_pci_ep *epc, rt_uint8_t func_no,
unsigned irq_nr)
{
rt_uint32_t val, reg;
rt_uint8_t func_offset = 0;
struct dw_pcie_ep_func *ep_func;
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
{
return -RT_EINVAL;
}
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = ep_func->msi_cap + func_offset + PCIR_MSI_CTRL;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCIM_MSICTRL_MMC_MASK;
val |= (irq_nr << 1) & PCIM_MSICTRL_MMC_MASK;
dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
dw_pcie_writew_dbi(pci, reg, val);
dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
return RT_EOK;
}
static rt_err_t dw_pcie_ep_get_msi(struct rt_pci_ep *epc, rt_uint8_t func_no,
unsigned *out_irq_nr)
{
rt_uint32_t val, reg;
rt_uint8_t func_offset = 0;
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie_ep_func *ep_func;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
{
return -RT_EINVAL;
}
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = ep_func->msi_cap + func_offset + PCIR_MSI_CTRL;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCIM_MSICTRL_MSI_ENABLE))
{
return -RT_EINVAL;
}
*out_irq_nr = (val & PCIM_MSICTRL_MME_MASK) >> 4;
return RT_EOK;
}
static rt_err_t dw_pcie_ep_set_msix(struct rt_pci_ep *epc, rt_uint8_t func_no,
unsigned irq_nr, int bar_idx, rt_off_t offset)
{
rt_uint32_t val, reg;
rt_uint8_t func_offset = 0;
struct dw_pcie_ep_func *ep_func;
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
{
return -RT_EINVAL;
}
dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = ep_func->msix_cap + func_offset + PCIR_MSIX_CTRL;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCIM_MSIXCTRL_TABLE_SIZE;
val |= irq_nr;
dw_pcie_writew_dbi(pci, reg, val);
reg = ep_func->msix_cap + func_offset + PCIR_MSIX_TABLE;
val = offset | bar_idx;
dw_pcie_writel_dbi(pci, reg, val);
reg = ep_func->msix_cap + func_offset + PCIR_MSIX_PBA;
val = (offset + (irq_nr * PCIM_MSIX_ENTRY_SIZE)) | bar_idx;
dw_pcie_writel_dbi(pci, reg, val);
dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
return RT_EOK;
}
static rt_err_t dw_pcie_ep_get_msix(struct rt_pci_ep *epc, rt_uint8_t func_no,
unsigned *out_irq_nr)
{
rt_uint32_t val, reg;
rt_uint8_t func_offset = 0;
struct dw_pcie_ep_func *ep_func;
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
{
return -RT_EINVAL;
}
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = ep_func->msix_cap + func_offset + PCIR_MSIX_CTRL;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCIM_MSIXCTRL_MSIX_ENABLE))
{
return -RT_EINVAL;
}
*out_irq_nr = val & PCIM_MSIXCTRL_TABLE_SIZE;
return RT_EOK;
}
static rt_err_t dw_pcie_ep_raise_irq(struct rt_pci_ep *epc, rt_uint8_t func_no,
enum rt_pci_ep_irq type, unsigned irq)
{
struct dw_pcie_ep *ep = epc->priv;
if (!ep->ops->raise_irq)
{
return -RT_ENOSYS;
}
return ep->ops->raise_irq(ep, func_no, type, irq);
}
static rt_err_t dw_pcie_ep_stop(struct rt_pci_ep *epc)
{
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
if (pci->ops->stop_link)
{
pci->ops->stop_link(pci);
}
return RT_EOK;
}
static rt_err_t dw_pcie_ep_start(struct rt_pci_ep *epc)
{
struct dw_pcie_ep *ep = epc->priv;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
if (pci->ops->start_link)
{
return pci->ops->start_link(pci);
}
return RT_EOK;
}
static const struct rt_pci_ep_ops dw_pcie_ep_ops =
{
.write_header = dw_pcie_ep_write_header,
.set_bar = dw_pcie_ep_set_bar,
.clear_bar = dw_pcie_ep_clear_bar,
.map_addr = dw_pcie_ep_map_addr,
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
.get_msi = dw_pcie_ep_get_msi,
.set_msix = dw_pcie_ep_set_msix,
.get_msix = dw_pcie_ep_get_msix,
.raise_irq = dw_pcie_ep_raise_irq,
.start = dw_pcie_ep_start,
.stop = dw_pcie_ep_stop,
};
rt_err_t dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no)
{
LOG_E("EP cannot trigger legacy IRQs");
return -RT_EINVAL;
}
rt_err_t dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no,
unsigned irq)
{
rt_err_t err;
rt_off_t aligned_offset;
rt_uint8_t func_offset = 0;
rt_uint64_t msg_addr;
rt_uint16_t msg_ctrl, msg_data;
rt_uint32_t msg_addr_lower, msg_addr_upper, reg;
struct rt_pci_ep *epc = ep->epc;
struct dw_pcie_ep_func *ep_func;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
{
return -RT_EINVAL;
}
func_offset = dw_pcie_ep_func_select(ep, func_no);
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
reg = ep_func->msi_cap + func_offset + PCIR_MSI_CTRL;
msg_ctrl = dw_pcie_readw_dbi(pci, reg);
reg = ep_func->msi_cap + func_offset + PCIR_MSI_ADDR;
msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
if (!!(msg_ctrl & PCIM_MSICTRL_64BIT))
{
reg = ep_func->msi_cap + func_offset + PCIR_MSI_ADDR_HIGH;
msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
reg = ep_func->msi_cap + func_offset + PCIR_MSI_DATA_64BIT;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
else
{
msg_addr_upper = 0;
reg = ep_func->msi_cap + func_offset + PCIR_MSI_DATA;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
aligned_offset = msg_addr_lower & (ep->page_size - 1);
msg_addr = ((rt_uint64_t)msg_addr_upper << 32) | (msg_addr_lower & ~aligned_offset);
if ((err = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phy, msg_addr, ep->page_size)))
{
return err;
}
HWREG32(ep->msi_mem + aligned_offset) = msg_data | (irq - 1);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phy);
return RT_EOK;
}
rt_err_t dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, rt_uint8_t func_no,
unsigned irq)
{
rt_uint32_t msg_data;
struct dw_pcie_ep_func *ep_func;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
{
return -RT_EINVAL;
}
msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) | (irq - 1);
dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
return RT_EOK;
}
rt_err_t dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no,
unsigned irq)
{
rt_err_t err;
int bar_idx;
rt_uint64_t msg_addr;
rt_uint32_t tbl_offset;
rt_off_t aligned_offset;
rt_uint8_t func_offset = 0;
rt_uint32_t reg, msg_data, vec_ctrl;
struct rt_pci_ep *epc = ep->epc;
struct rt_pci_ep_msix_tbl *msix_tbl;
struct dw_pcie_ep_func *ep_func;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
{
return -RT_EINVAL;
}
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = ep_func->msix_cap + func_offset + PCIR_MSIX_TABLE;
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bar_idx = (tbl_offset & PCIM_MSIX_BIR_MASK);
tbl_offset &= PCIM_MSIX_TABLE_OFFSET;
msix_tbl = (void *)ep->epc_bar[bar_idx]->cpu_addr + tbl_offset;
msg_addr = msix_tbl[(irq - 1)].msg_addr;
msg_data = msix_tbl[(irq - 1)].msg_data;
vec_ctrl = msix_tbl[(irq - 1)].vector_ctrl;
if (vec_ctrl & PCIM_MSIX_ENTRYVECTOR_CTRL_MASK)
{
return -RT_EINVAL;
}
aligned_offset = msg_addr & (ep->page_size - 1);
if ((err = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phy, msg_addr, ep->page_size)))
{
return err;
}
HWREG32(ep->msi_mem + aligned_offset) = msg_data;
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phy);
return RT_EOK;
}
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
struct rt_pci_ep *epc = ep->epc;
if (ep->msi_mem)
{
rt_pci_ep_mem_free(epc, ep->msi_mem, ep->msi_mem_phy, ep->page_size);
}
if (!rt_list_isempty(&ep->func_nodes))
{
struct dw_pcie_ep_func *ep_func, *ep_func_next;
rt_list_for_each_entry_safe(ep_func, ep_func_next, &ep->func_nodes, list)
{
rt_list_remove(&ep_func->list);
rt_free(ep_func);
}
}
if (ep->ib_window_map)
{
rt_free(ep->ib_window_map);
}
if (ep->ob_window_map)
{
rt_free(ep->ob_window_map);
}
if (ep->outbound_addr)
{
rt_free(ep->outbound_addr);
}
if (epc)
{
rt_free(epc);
}
}
static rt_uint32_t dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
{
rt_uint32_t header;
int pos = (PCI_REGMAX + 1);
while (pos)
{
header = dw_pcie_readl_dbi(pci, pos);
if (PCI_EXTCAP_ID(header) == cap)
{
return pos;
}
if (!(pos = PCI_EXTCAP_NEXTPTR(header)))
{
break;
}
}
return 0;
}
rt_err_t dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
rt_off_t offset;
rt_size_t bar_nr;
rt_uint32_t reg;
rt_uint8_t hdr_type;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
hdr_type = dw_pcie_readb_dbi(pci, PCIR_HDRTYPE) & PCIM_HDRTYPE;
if (hdr_type != PCIM_HDRTYPE_NORMAL)
{
LOG_E("PCIe controller is not set to EP mode hdr_type = %x", hdr_type);
return -RT_EIO;
}
offset = dw_pcie_ep_find_ext_capability(pci, PCIZ_RESIZE_BAR);
dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
if (offset)
{
reg = dw_pcie_readl_dbi(pci, offset + PCIM_REBAR_CTRL);
bar_nr = (reg & PCIM_REBAR_CTRL_NBAR_MASK) >> PCIM_REBAR_CTRL_NBAR_SHIFT;
for (int i = 0; i < bar_nr; ++i, offset += PCIM_REBAR_CTRL)
{
dw_pcie_writel_dbi(pci, offset + PCIM_REBAR_CAP, 0x0);
}
}
dw_pcie_setup(pci);
dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
return RT_EOK;
}
rt_err_t dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
rt_err_t err;
struct rt_pci_ep *epc = RT_NULL;
struct dw_pcie_ep_func *ep_func;
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
struct rt_device *dev = pci->dev;
rt_list_init(&ep->func_nodes);
if (!pci->dbi_base || !pci->dbi_base2)
{
LOG_E("dbi_base/dbi_base2 is not populated");
return -RT_EINVAL;
}
if ((err = rt_dm_dev_prop_read_u32(dev, "num-ib-windows", &ep->num_ib_windows)))
{
LOG_E("Unable to read 'num-ib-windows' property");
return err;
}
if (ep->num_ib_windows > MAX_IATU_IN)
{
LOG_E("Invalid 'num-ib-windows'");
return -RT_EINVAL;
}
if ((err = rt_dm_dev_prop_read_u32(dev, "num-ob-windows", &ep->num_ob_windows)))
{
LOG_E("Unable to read 'num-ob-windows' property");
return err;
}
if (ep->num_ob_windows > MAX_IATU_OUT)
{
LOG_E("Invalid 'num-ob-windows'");
return -RT_EINVAL;
}
ep->ib_window_map = rt_calloc(RT_BITMAP_LEN(ep->num_ib_windows), sizeof(rt_bitmap_t));
if (!ep->ib_window_map)
{
return -RT_ENOMEM;
}
ep->ob_window_map = rt_calloc(RT_BITMAP_LEN(ep->num_ob_windows), sizeof(rt_bitmap_t));
if (!ep->ob_window_map)
{
err = -RT_ENOMEM;
goto _fail;
}
ep->outbound_addr = rt_calloc(ep->num_ob_windows, sizeof(rt_ubase_t));
if (!ep->outbound_addr)
{
err = -RT_ENOMEM;
goto _fail;
}
if (pci->link_gen < 1)
{
pci->link_gen = -1;
rt_dm_dev_prop_read_u32(dev, "max-link-speed", &pci->link_gen);
}
epc = rt_calloc(1, sizeof(*epc));
if (!epc)
{
err = -RT_ENOMEM;
goto _fail;
}
epc->name = rt_dm_dev_get_name(dev);
epc->rc_dev = dev;
epc->ops = &dw_pcie_ep_ops;
epc->priv = ep;
if ((err = rt_pci_ep_register(epc)))
{
goto _fail;
}
ep->epc = epc;
if (rt_dm_dev_prop_read_u8(dev, "max-functions", &epc->max_functions))
{
epc->max_functions = 1;
}
for (rt_uint8_t func_no = 0; func_no < epc->max_functions; ++func_no)
{
ep_func = rt_calloc(1, sizeof(*ep_func));
if (!ep_func)
{
err = -RT_ENOMEM;
goto _fail;
}
ep_func->func_no = func_no;
ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, PCIY_MSI);
ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, PCIY_MSIX);
rt_list_init(&ep_func->list);
rt_list_insert_after(&ep->func_nodes, &ep_func->list);
}
if (ep->ops->ep_init)
{
ep->ops->ep_init(ep);
}
if ((err = rt_pci_ep_mem_init(epc, ep->aspace, ep->aspace_size, ep->page_size)))
{
goto _fail;
}
ep->msi_mem = rt_pci_ep_mem_alloc(epc, &ep->msi_mem_phy, ep->page_size);
if (!ep->msi_mem)
{
LOG_E("Failed to reserve memory for MSI/MSI-X");
err = -RT_ENOMEM;
goto _fail;
}
if ((err = dw_pcie_ep_init_complete(ep)))
{
goto _fail;
}
return RT_EOK;
_fail:
dw_pcie_ep_exit(ep);
return err;
}

View File

@@ -0,0 +1,644 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-23 GuEe-GUI first version
*/
#define DBG_TAG "pcie.dw-host"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "pcie-dw.h"
static void dw_pcie_irq_ack(struct rt_pic_irq *pirq)
{
int hwirq = pirq->hwirq;
rt_uint32_t res, bit, ctrl;
struct dw_pcie_port *port = pirq->pic->priv_data;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, RT_BIT(bit));
}
static void dw_pcie_irq_mask(struct rt_pic_irq *pirq)
{
rt_ubase_t level;
int hwirq = pirq->hwirq;
rt_uint32_t res, bit, ctrl;
struct dw_pcie_port *port = pirq->pic->priv_data;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
rt_pci_msi_mask_irq(pirq);
level = rt_spin_lock_irqsave(&port->lock);
ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
port->irq_mask[ctrl] |= RT_BIT(bit);
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
rt_spin_unlock_irqrestore(&port->lock, level);
}
static void dw_pcie_irq_unmask(struct rt_pic_irq *pirq)
{
rt_ubase_t level;
int hwirq = pirq->hwirq;
rt_uint32_t res, bit, ctrl;
struct dw_pcie_port *port = pirq->pic->priv_data;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
rt_pci_msi_unmask_irq(pirq);
level = rt_spin_lock_irqsave(&port->lock);
ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
port->irq_mask[ctrl] &= ~RT_BIT(bit);
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
rt_spin_unlock_irqrestore(&port->lock, level);
}
static void dw_pcie_compose_msi_msg(struct rt_pic_irq *pirq, struct rt_pci_msi_msg *msg)
{
rt_uint64_t msi_target;
struct dw_pcie_port *port = pirq->pic->priv_data;
msi_target = (rt_uint64_t)port->msi_data_phy;
msg->address_lo = rt_lower_32_bits(msi_target);
msg->address_hi = rt_upper_32_bits(msi_target);
msg->data = pirq->hwirq;
}
static int dw_pcie_irq_alloc_msi(struct rt_pic *pic, struct rt_pci_msi_desc *msi_desc)
{
rt_ubase_t level;
int irq, hwirq;
struct rt_pic_irq *pirq;
struct dw_pcie_port *port = pic->priv_data;
level = rt_spin_lock_irqsave(&port->lock);
hwirq = rt_bitmap_next_clear_bit(port->msi_map, 0, port->irq_count);
if (hwirq >= port->irq_count)
{
irq = -RT_EEMPTY;
goto _out_lock;
}
pirq = rt_pic_find_irq(pic, hwirq);
irq = rt_pic_config_irq(pic, hwirq, hwirq);
pirq->mode = RT_IRQ_MODE_EDGE_RISING;
rt_bitmap_set_bit(port->msi_map, hwirq);
_out_lock:
rt_spin_unlock_irqrestore(&port->lock, level);
return irq;
}
static void dw_pcie_irq_free_msi(struct rt_pic *pic, int irq)
{
rt_ubase_t level;
struct rt_pic_irq *pirq;
struct dw_pcie_port *port = pic->priv_data;
pirq = rt_pic_find_pirq(pic, irq);
if (!pirq)
{
return;
}
level = rt_spin_lock_irqsave(&port->lock);
rt_bitmap_clear_bit(port->msi_map, pirq->hwirq);
rt_spin_unlock_irqrestore(&port->lock, level);
}
const static struct rt_pic_ops dw_pci_msi_ops =
{
.name = "DWPCI-MSI",
.irq_ack = dw_pcie_irq_ack,
.irq_mask = dw_pcie_irq_mask,
.irq_unmask = dw_pcie_irq_unmask,
.irq_compose_msi_msg = dw_pcie_compose_msi_msg,
.irq_alloc_msi = dw_pcie_irq_alloc_msi,
.irq_free_msi = dw_pcie_irq_free_msi,
.flags = RT_PIC_F_IRQ_ROUTING,
};
/* MSI int handler */
rt_err_t dw_handle_msi_irq(struct dw_pcie_port *port)
{
rt_err_t err;
int i, pos;
rt_bitmap_t status;
rt_uint32_t num_ctrls;
struct rt_pic_irq *pirq;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
struct rt_pic *msi_pic = port->msi_pic;
err = -RT_EEMPTY;
num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
for (i = 0; i < num_ctrls; ++i)
{
status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
(i * MSI_REG_CTRL_BLOCK_SIZE));
if (!status)
{
continue;
}
err = RT_EOK;
rt_bitmap_for_each_set_bit(&status, pos, MAX_MSI_IRQS_PER_CTRL)
{
pirq = rt_pic_find_irq(msi_pic, pos + i * MAX_MSI_IRQS_PER_CTRL);
dw_pcie_irq_ack(pirq);
rt_pic_handle_isr(pirq);
}
}
return err;
}
static void dw_pcie_msi_isr(int irqno, void *param)
{
struct dw_pcie_port *port = param;
dw_handle_msi_irq(port);
}
void dw_pcie_free_msi(struct dw_pcie_port *port)
{
if (port->msi_irq >= 0)
{
rt_hw_interrupt_mask(port->msi_irq);
rt_pic_detach_irq(port->msi_irq, port);
}
if (port->msi_data)
{
struct dw_pcie *pci = to_dw_pcie_from_port(port);
rt_dma_free_coherent(pci->dev, sizeof(rt_uint64_t), port->msi_data,
port->msi_data_phy);
}
}
void dw_pcie_msi_init(struct dw_pcie_port *port)
{
#ifdef RT_PCI_MSI
struct dw_pcie *pci = to_dw_pcie_from_port(port);
rt_uint64_t msi_target = (rt_uint64_t)port->msi_data_phy;
/* Program the msi_data_phy */
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, rt_lower_32_bits(msi_target));
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, rt_upper_32_bits(msi_target));
#endif
}
static const struct rt_pci_ops dw_child_pcie_ops;
static const struct rt_pci_ops dw_pcie_ops;
rt_err_t dw_pcie_host_init(struct dw_pcie_port *port)
{
rt_err_t err;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
struct rt_device *dev = pci->dev;
struct rt_pci_host_bridge *bridge;
rt_spin_lock_init(&port->lock);
rt_dm_dev_get_address_by_name(dev, "config", &port->cfg0_addr, &port->cfg0_size);
if (port->cfg0_addr)
{
port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
if (!port->cfg0_base)
{
return -RT_EIO;
}
}
else if (!port->cfg0_base)
{
LOG_E("Missing 'config' reg space");
}
if (!(bridge = rt_pci_host_bridge_alloc(0)))
{
return -RT_ENOMEM;
}
bridge->parent.ofw_node = dev->ofw_node;
if ((err = rt_pci_host_bridge_init(bridge)))
{
goto _err_free_bridge;
}
port->bridge = bridge;
for (int i = 0; i < bridge->bus_regions_nr; ++i)
{
struct rt_pci_bus_region *region = &bridge->bus_regions[i];
switch (region->flags)
{
case PCI_BUS_REGION_F_IO:
port->io_addr = region->cpu_addr;
port->io_bus_addr = region->phy_addr;
port->io_size = region->size;
break;
case PCI_BUS_REGION_F_NONE:
port->cfg0_size = region->size;
port->cfg0_addr = region->cpu_addr;
if (!pci->dbi_base)
{
pci->dbi_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
if (!pci->dbi_base)
{
LOG_E("Error with ioremap");
return -RT_ENOMEM;
}
}
break;
default:
break;
}
}
if (!port->cfg0_base && port->cfg0_addr)
{
port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
if (!port->cfg0_base)
{
return -RT_ENOMEM;
}
}
if (rt_dm_dev_prop_read_u32(dev, "num-viewport", &pci->num_viewport))
{
pci->num_viewport = 2;
}
if (pci->link_gen < 1)
{
pci->link_gen = -1;
rt_dm_dev_prop_read_u32(dev, "max-link-speed", &pci->link_gen);
}
/*
* If a specific SoC driver needs to change the default number of vectors,
* it needs to implement the set_irq_count callback.
*/
if (!port->ops->set_irq_count)
{
port->irq_count = MSI_DEF_NUM_VECTORS;
}
else
{
port->ops->set_irq_count(port);
if (port->irq_count > MAX_MSI_IRQS || port->irq_count == 0)
{
LOG_E("Invalid count of irq = %d", port->irq_count);
return -RT_EINVAL;
}
}
if (!port->ops->msi_host_init)
{
port->msi_pic = rt_calloc(1, sizeof(*port->msi_pic));
if (!port->msi_pic)
{
return -RT_ENOMEM;
}
port->msi_pic->priv_data = port;
port->msi_pic->ops = &dw_pci_msi_ops;
rt_pic_linear_irq(port->msi_pic, port->irq_count);
rt_pic_user_extends(port->msi_pic);
if (port->msi_irq)
{
rt_hw_interrupt_install(port->msi_irq, dw_pcie_msi_isr, port, "dwc-pci-msi");
rt_hw_interrupt_umask(port->msi_irq);
}
port->msi_data = rt_dma_alloc_coherent(pci->dev, sizeof(rt_uint64_t),
&port->msi_data_phy);
if (!port->msi_data)
{
err = -RT_ENOMEM;
goto _err_free_msi;
}
}
else
{
if ((err = port->ops->msi_host_init(port)))
{
return err;
}
}
/* Set default bus ops */
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
if (port->ops->host_init && (err = port->ops->host_init(port)))
{
goto _err_free_msi;
}
bridge->sysdata = port;
if ((err = rt_pci_host_bridge_probe(bridge)))
{
goto _err_free_msi;
}
return RT_EOK;
_err_free_msi:
if (!port->ops->msi_host_init)
{
dw_pcie_free_msi(port);
rt_pic_cancel_irq(port->msi_pic);
rt_free(port->msi_pic);
port->msi_pic = RT_NULL;
}
_err_free_bridge:
rt_pci_host_bridge_free(bridge);
port->bridge = RT_NULL;
return err;
}
void dw_pcie_host_deinit(struct dw_pcie_port *port)
{
if (!port->ops->msi_host_init)
{
dw_pcie_free_msi(port);
}
}
void dw_pcie_host_free(struct dw_pcie_port *port)
{
if (!port->ops->msi_host_init)
{
dw_pcie_free_msi(port);
rt_pic_cancel_irq(port->msi_pic);
rt_free(port->msi_pic);
}
if (port->bridge)
{
rt_pci_host_bridge_free(port->bridge);
}
}
static void *dw_pcie_other_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
{
int type;
rt_uint32_t busdev;
struct dw_pcie_port *port = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
/*
* Checking whether the link is up here is a last line of defense
* against platforms that forward errors on the system bus as
* SError upon PCI configuration transactions issued when the link is down.
* This check is racy by definition and does not stop the system from
* triggering an SError if the link goes down after this check is performed.
*/
if (!dw_pcie_link_up(pci))
{
return RT_NULL;
}
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(RT_PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(RT_PCI_FUNC(devfn));
if (rt_pci_is_root_bus(bus->parent))
{
type = PCIE_ATU_TYPE_CFG0;
}
else
{
type = PCIE_ATU_TYPE_CFG1;
}
dw_pcie_prog_outbound_atu(pci, 0, type, port->cfg0_addr, busdev, port->cfg0_size);
return port->cfg0_base + reg;
}
static rt_err_t dw_pcie_other_read_conf(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
{
rt_err_t err;
struct dw_pcie_port *port = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
err = rt_pci_bus_read_config_uxx(bus, devfn, reg, width, value);
if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
{
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
port->io_addr, port->io_bus_addr, port->io_size);
}
return err;
}
static rt_err_t dw_pcie_other_write_conf(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
{
rt_err_t err;
struct dw_pcie_port *port = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
err = rt_pci_bus_write_config_uxx(bus, devfn, reg, width, value);
if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
{
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
port->io_addr, port->io_bus_addr, port->io_size);
}
return err;
}
static const struct rt_pci_ops dw_child_pcie_ops =
{
.map = dw_pcie_other_conf_map,
.read = dw_pcie_other_read_conf,
.write = dw_pcie_other_write_conf,
};
void *dw_pcie_own_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
{
struct dw_pcie_port *port = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
if (RT_PCI_SLOT(devfn) > 0)
{
return RT_NULL;
}
return pci->dbi_base + reg;
}
static const struct rt_pci_ops dw_pcie_ops =
{
.map = dw_pcie_own_conf_map,
.read = rt_pci_bus_read_config_uxx,
.write = rt_pci_bus_write_config_uxx,
};
void dw_pcie_setup_rc(struct dw_pcie_port *port)
{
rt_uint32_t val, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_port(port);
/*
* Enable DBI read-only registers for writing/updating configuration.
* Write permission gets disabled towards the end of this function.
*/
dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
dw_pcie_setup(pci);
if (!port->ops->msi_host_init)
{
num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
/* Initialize IRQ Status array */
for (int ctrl = 0; ctrl < num_ctrls; ++ctrl)
{
port->irq_mask[ctrl] = ~0;
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE), port->irq_mask[ctrl]);
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE), ~0);
}
}
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCIR_BAR(0), PCIM_BAR_MEM_TYPE_64);
dw_pcie_writel_dbi(pci, PCIR_BAR(1), PCIM_BAR_MEM_TYPE_32);
/* Setup interrupt pins */
val = dw_pcie_readl_dbi(pci, PCIR_INTLINE);
val &= 0xffff00ff;
val |= 0x00000100;
dw_pcie_writel_dbi(pci, PCIR_INTLINE, val);
/* Setup bus numbers */
val = dw_pcie_readl_dbi(pci, PCIR_PRIBUS_1);
val &= 0xff000000;
val |= 0x00ff0100;
dw_pcie_writel_dbi(pci, PCIR_PRIBUS_1, val);
/* Setup command register */
val = dw_pcie_readl_dbi(pci, PCIR_COMMAND);
val &= 0xffff0000;
val |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN;
dw_pcie_writel_dbi(pci, PCIR_COMMAND, val);
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
* ATU, so we should not program the ATU here.
*/
if (pci->port.bridge->child_ops == &dw_child_pcie_ops)
{
int atu_idx = 0;
struct rt_pci_host_bridge *bridge = port->bridge;
/* Get last memory resource entry */
for (int i = 0; i < bridge->bus_regions_nr; ++i)
{
struct rt_pci_bus_region *region = &bridge->bus_regions[i];
if (region->flags != PCI_BUS_REGION_F_MEM)
{
continue;
}
if (pci->num_viewport <= ++atu_idx)
{
break;
}
dw_pcie_prog_outbound_atu(pci, atu_idx,
PCIE_ATU_TYPE_MEM, region->cpu_addr,
region->phy_addr, region->size);
}
if (port->io_size)
{
if (pci->num_viewport > ++atu_idx)
{
dw_pcie_prog_outbound_atu(pci, atu_idx,
PCIE_ATU_TYPE_IO, port->io_addr,
port->io_bus_addr, port->io_size);
}
else
{
pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED;
}
}
if (pci->num_viewport <= atu_idx)
{
LOG_W("Resources exceed number of ATU entries (%d)", pci->num_viewport);
}
}
dw_pcie_writel_dbi(pci, PCIR_BAR(0), 0);
/* Program correct class for RC */
dw_pcie_writew_dbi(pci, PCIR_SUBCLASS, PCIS_BRIDGE_PCI);
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
}

View File

@@ -0,0 +1,295 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-23 GuEe-GUI first version
*/
#include <rtthread.h>
#include <rtdevice.h>
#define DBG_TAG "pcie.dw.platfrom"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "pcie-dw.h"
struct dw_dw_platform_pcie_soc_data
{
enum dw_pcie_device_mode mode;
};
struct dw_platform_pcie
{
struct dw_pcie *pci;
struct rt_syscon *regmap;
const struct dw_dw_platform_pcie_soc_data *soc_data;
};
static rt_err_t dw_platform_pcie_host_init(struct dw_pcie_port *port)
{
struct dw_pcie *pci = to_dw_pcie_from_port(port);
dw_pcie_setup_rc(port);
dw_pcie_wait_for_link(pci);
dw_pcie_msi_init(port);
return RT_EOK;
}
static void dw_platform_set_irq_count(struct dw_pcie_port *pp)
{
pp->irq_count = MAX_MSI_IRQS;
}
static const struct dw_pcie_host_ops dw_platform_pcie_host_ops =
{
.host_init = dw_platform_pcie_host_init,
.set_irq_count = dw_platform_set_irq_count,
};
static rt_err_t dw_platform_pcie_establish_link(struct dw_pcie *pci)
{
return RT_EOK;
}
static const struct dw_pcie_ops dw_platform_pcie_ops =
{
.start_link = dw_platform_pcie_establish_link,
};
static rt_err_t dw_platform_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
for (int bar = 0; bar < PCI_STD_NUM_BARS; ++bar)
{
dw_pcie_ep_reset_bar(pci, bar);
}
return RT_EOK;
}
static rt_err_t dw_platform_pcie_ep_raise_irq(struct dw_pcie_ep *ep,
rt_uint8_t func_no, enum rt_pci_ep_irq type, unsigned irq)
{
switch (type)
{
case RT_PCI_EP_IRQ_LEGACY:
return dw_pcie_ep_raise_legacy_irq(ep, func_no);
case RT_PCI_EP_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, irq);
case RT_PCI_EP_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, irq);
default:
LOG_E("Unknown IRQ type = %d", type);
}
return RT_EOK;
}
static const struct dw_pcie_ep_ops dw_platform_pcie_ep_ops =
{
.ep_init = dw_platform_pcie_ep_init,
.raise_irq = dw_platform_pcie_ep_raise_irq,
};
static rt_err_t dw_platform_add_pcie_port(struct dw_platform_pcie *plat_pcie,
struct rt_device *dev)
{
rt_err_t err;
struct dw_pcie *pci = plat_pcie->pci;
struct dw_pcie_port *port = &pci->port;
port->sys_irq = rt_dm_dev_get_irq(dev, 1);
if (port->sys_irq < 0)
{
return port->sys_irq;
}
#ifdef RT_PCI_MSI
port->msi_irq = rt_dm_dev_get_irq(dev, 0);
if (port->msi_irq < 0)
{
return port->msi_irq;
}
#endif
port->ops = &dw_platform_pcie_host_ops;
if ((err = dw_pcie_host_init(port)))
{
LOG_E("Failed to initialize host");
return err;
}
return RT_EOK;
}
static rt_err_t dw_platform_add_pcie_ep(struct dw_platform_pcie *plat_pcie,
struct rt_device *dev)
{
rt_err_t err;
struct dw_pcie *pci = plat_pcie->pci;
struct dw_pcie_ep *ep = &pci->endpoint;
pci->dbi_base2 = rt_dm_dev_iomap_by_name(dev, "dbi2");
if (!pci->dbi_base2)
{
return -RT_EIO;
}
err = rt_dm_dev_get_address_by_name(dev, "addr_space", &ep->aspace, &ep->aspace_size);
if (err)
{
rt_iounmap(pci->dbi_base2);
return err;
}
ep->ops = &dw_platform_pcie_ep_ops;
if ((err = dw_pcie_ep_init(ep)))
{
LOG_E("Failed to initialize endpoint");
return err;
}
return RT_EOK;
}
static rt_err_t dw_platform_pcie_probe(struct rt_platform_device *pdev)
{
rt_err_t err;
struct dw_pcie *pci = RT_NULL;
struct dw_platform_pcie *plat_pcie;
struct rt_device *dev = &pdev->parent;
if (!(plat_pcie = rt_calloc(1, sizeof(*plat_pcie))))
{
return -RT_ENOMEM;
}
if (!(pci = rt_calloc(1, sizeof(*pci))))
{
err = -RT_ENOMEM;
goto _fail;
}
plat_pcie->pci = pci;
plat_pcie->soc_data = pdev->id->data;
pci->dev = dev;
pci->ops = &dw_platform_pcie_ops;
pci->dbi_base = rt_dm_dev_iomap_by_name(dev, "dbi");
if (!pci->dbi_base)
{
err = -RT_EIO;
goto _fail;
}
dev->user_data = plat_pcie;
switch (plat_pcie->soc_data->mode)
{
case DW_PCIE_RC_TYPE:
if (!RT_KEY_ENABLED(RT_PCI_DW_HOST))
{
err = -RT_ENOSYS;
goto _fail;
}
if ((err = dw_platform_add_pcie_port(plat_pcie, dev)))
{
goto _fail;
}
break;
case DW_PCIE_EP_TYPE:
if (!RT_KEY_ENABLED(RT_PCI_DW_EP))
{
err = -RT_ENOSYS;
goto _fail;
}
if ((err = dw_platform_add_pcie_ep(plat_pcie, dev)))
{
goto _fail;
}
break;
default:
LOG_E("Invalid device type %d", plat_pcie->soc_data->mode);
err = -RT_EINVAL;
goto _fail;
}
return RT_EOK;
_fail:
if (pci)
{
if (pci->dbi_base)
{
rt_iounmap(pci->dbi_base);
}
rt_free(pci);
}
rt_free(plat_pcie);
return err;
}
static rt_err_t dw_platform_pcie_remove(struct rt_platform_device *pdev)
{
struct dw_platform_pcie *plat_pcie = pdev->parent.user_data;
rt_pci_host_bridge_remove(plat_pcie->pci->port.bridge);
dw_pcie_host_free(&plat_pcie->pci->port);
rt_iounmap(plat_pcie->pci->dbi_base);
rt_free(plat_pcie->pci);
rt_free(plat_pcie);
return RT_EOK;
}
static const struct dw_dw_platform_pcie_soc_data dw_platform_pcie_rc_soc_data =
{
.mode = DW_PCIE_RC_TYPE,
};
static const struct dw_dw_platform_pcie_soc_data dw_platform_pcie_ep_soc_data =
{
.mode = DW_PCIE_EP_TYPE,
};
static const struct rt_ofw_node_id dw_platform_pcie_ofw_ids[] =
{
{ .compatible = "snps,dw-pcie", .data = &dw_platform_pcie_rc_soc_data },
{ .compatible = "snps,dw-pcie-ep", .data = &dw_platform_pcie_ep_soc_data },
{ /* sentinel */ }
};
static struct rt_platform_driver dw_platform_pcie_driver =
{
.name = "dw-pcie",
.ids = dw_platform_pcie_ofw_ids,
.probe = dw_platform_pcie_probe,
.remove = dw_platform_pcie_remove,
};
RT_PLATFORM_DRIVER_EXPORT(dw_platform_pcie_driver);

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rtthread.h>
#include "../ecam.h"
rt_err_t pci_host_common_probe(struct rt_platform_device *pdev)
{
void *base;
rt_err_t err;
struct rt_device *dev = &pdev->parent;
struct pci_ecam_config_window *conf_win;
struct rt_pci_host_bridge *host_bridge = rt_pci_host_bridge_alloc(0);
if (!host_bridge)
{
return -RT_ENOMEM;
}
if (!(base = rt_dm_dev_iomap(dev, 0)))
{
err = -RT_EIO;
goto _fail;
}
host_bridge->parent.ofw_node = dev->ofw_node;
if ((err = rt_pci_host_bridge_init(host_bridge)))
{
goto _fail;
}
host_bridge->sysdata = conf_win = pci_ecam_create(host_bridge,
(const struct pci_ecam_ops *)pdev->id->data);
if (!conf_win)
{
err = -RT_ENOMEM;
goto _fail;
}
conf_win->win = base;
conf_win->priv = host_bridge;
if ((err = rt_pci_host_bridge_probe(host_bridge)))
{
goto _fail;
}
dev->user_data = host_bridge;
return RT_EOK;
_fail:
if (base)
{
rt_iounmap(base);
}
rt_pci_host_bridge_free(host_bridge);
return err;
}
rt_err_t pci_host_common_remove(struct rt_platform_device *pdev)
{
struct pci_ecam_config_window *conf_win;
struct rt_pci_host_bridge *host_bridge = pdev->parent.user_data;
rt_pci_host_bridge_remove(host_bridge);
conf_win = host_bridge->sysdata;
rt_iounmap(conf_win->win);
rt_pci_host_bridge_free(host_bridge);
return RT_EOK;
}

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rtthread.h>
#include "../ecam.h"
static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops =
{
.bus_shift = 16,
.pci_ops =
{
.map = pci_ecam_map,
.read = rt_pci_bus_read_config_uxx,
.write = rt_pci_bus_write_config_uxx,
}
};
static void *pci_dw_ecam_map_bus(struct rt_pci_bus *bus, rt_uint32_t devfn, int where)
{
struct pci_ecam_config_window *conf_win = bus->sysdata;
if (bus->number == conf_win->bus_range[0] && RT_PCI_SLOT(devfn) > 0)
{
return RT_NULL;
}
return pci_ecam_map(bus, devfn, where);
}
static const struct pci_ecam_ops pci_dw_ecam_bus_ops =
{
.pci_ops =
{
.map = pci_dw_ecam_map_bus,
.read = rt_pci_bus_read_config_uxx,
.write = rt_pci_bus_write_config_uxx,
}
};
static const struct rt_ofw_node_id gen_pci_ofw_ids[] =
{
{ .compatible = "pci-host-cam-generic", .data = &gen_pci_cfg_cam_bus_ops },
{ .compatible = "pci-host-ecam-generic", .data = &pci_generic_ecam_ops },
{ .compatible = "marvell,armada8k-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
{ .compatible = "socionext,synquacer-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
{ .compatible = "snps,dw-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
{ /* sentinel */ }
};
static struct rt_platform_driver gen_pci_driver =
{
.name = "pci-host-generic",
.ids = gen_pci_ofw_ids,
.probe = pci_host_common_probe,
.remove = pci_host_common_remove,
};
RT_PLATFORM_DRIVER_EXPORT(gen_pci_driver);

View File

@@ -0,0 +1,60 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-07 GuEe-GUI first version
*/
#include <rtthread.h>
#define DBG_TAG "pci.irq"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <drivers/pci.h>
void rt_pci_assign_irq(struct rt_pci_device *pdev)
{
int irq = 0;
rt_uint8_t pin, slot = -1;
struct rt_pci_host_bridge *host_bridge = rt_pci_find_host_bridge(pdev->bus);
if (!host_bridge->irq_map)
{
LOG_D("PCI-Device<%s> runtime IRQ mapping not provided by platform",
rt_dm_dev_get_name(&pdev->parent));
return;
}
/* Must try the swizzle when interrupt line passes through a P2P bridge */
rt_pci_read_config_u8(pdev, PCIR_INTPIN, &pin);
if (pin > RT_PCI_INTX_PIN_MAX)
{
pin = 1;
}
if (pin)
{
if (host_bridge->irq_slot)
{
slot = host_bridge->irq_slot(pdev, &pin);
}
/* Map IRQ */
if ((irq = host_bridge->irq_map(pdev, slot, pin)) == -1)
{
irq = 0;
}
}
pdev->irq = irq;
LOG_D("PCI-Device<%s> assign IRQ: got %d", rt_dm_dev_get_name(&pdev->parent), pdev->irq);
/* Save IRQ */
rt_pci_write_config_u8(pdev, PCIR_INTLINE, irq);
}

View File

@@ -0,0 +1,15 @@
from building import *
group = []
if not GetDepend(['RT_PCI_MSI']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../../include']
src = ['device.c', 'irq.c', 'msi.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,46 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <drivers/pci.h>
void rt_pci_msi_init(struct rt_pci_device *pdev)
{
if (pdev && (pdev->msi_cap = rt_pci_find_capability(pdev, PCIY_MSI)))
{
rt_uint16_t ctrl;
rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &ctrl);
if (ctrl & PCIM_MSICTRL_MSI_ENABLE)
{
rt_pci_write_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, ctrl & ~PCIM_MSICTRL_MSI_ENABLE);
}
if (!(ctrl & PCIM_MSICTRL_64BIT))
{
pdev->no_64bit_msi = RT_TRUE;
}
}
}
void rt_pci_msix_init(struct rt_pci_device *pdev)
{
if (pdev && (pdev->msix_cap = rt_pci_find_capability(pdev, PCIY_MSIX)))
{
rt_uint16_t ctrl;
rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &ctrl);
if (ctrl & PCIM_MSIXCTRL_MSIX_ENABLE)
{
rt_pci_write_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE);
}
}
}

View File

@@ -0,0 +1,146 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <drivers/pci_msi.h>
#define DBG_TAG "pci.msi.irq"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static struct rt_spinlock msi_irq_map_lock = {};
static RT_BITMAP_DECLARE(msi_irq_map, MAX_HANDLERS) = {};
rt_err_t rt_pci_msi_setup_irqs(struct rt_pci_device *pdev, int nvec, int type)
{
int irq, index = 0, irq_nr = 0;
rt_err_t err = RT_EOK;
struct rt_pic_irq *pirq;
struct rt_pic *msi_pic;
struct rt_pci_msi_desc *desc;
if (!pdev)
{
return -RT_EINVAL;
}
msi_pic = pdev->msi_pic;
if (type == PCIY_MSI)
{
int last_irq = -1, irq_idx;
rt_size_t irq_nr;
desc = rt_pci_msi_first_desc(pdev);
irq_nr = 1 << desc->msi.cap.multi_msg_use;
rt_hw_spin_lock(&msi_irq_map_lock.lock);
_retry:
for (int i = 0; i < irq_nr; ++i)
{
if ((irq = msi_pic->ops->irq_alloc_msi(msi_pic, desc)) < 0)
{
err = irq;
LOG_E("Setup %s[%d] IRQ error = %s", "MSI", i, rt_strerror(err));
break;
}
if (last_irq >= 0 && last_irq + 1 != irq)
{
for (int idx = 0; idx < i; ++i, --last_irq)
{
rt_bitmap_set_bit(msi_irq_map, last_irq);
}
last_irq = irq;
goto _retry;
}
last_irq = irq;
}
if (!err)
{
/* Get the first irq */
desc->irq = irq - (irq_nr - 1);
}
rt_bitmap_for_each_set_bit(msi_irq_map, irq_idx, MAX_HANDLERS)
{
msi_pic->ops->irq_free_msi(msi_pic, irq_idx);
/* Free bit so the next user doesn't need to bzero */
rt_bitmap_clear_bit(msi_irq_map, irq_idx);
}
rt_hw_spin_unlock(&msi_irq_map_lock.lock);
if (!err)
{
for (int idx = 0; idx < nvec; ++idx)
{
pirq = rt_pic_find_pirq(msi_pic, irq + idx);
pirq->msi_desc = desc;
msi_pic->ops->irq_compose_msi_msg(pirq, &desc->msg);
rt_pci_msi_write_msg(desc, &desc->msg);
}
}
}
else if (type == PCIY_MSIX)
{
rt_pci_msi_for_each_desc(pdev, desc)
{
if ((irq = msi_pic->ops->irq_alloc_msi(msi_pic, desc)) < 0)
{
err = irq;
LOG_E("Setup %s[%d] IRQ error = %s", "MSI-X",
desc->msix.index, rt_strerror(err));
break;
}
desc->irq = irq;
pirq = rt_pic_find_pirq(msi_pic, irq);
pirq->msi_desc = desc;
msi_pic->ops->irq_compose_msi_msg(pirq, &desc->msg);
rt_pci_msi_write_msg(desc, &desc->msg);
++irq_nr;
}
if (err)
{
rt_pci_msi_for_each_desc(pdev, desc)
{
if (index >= irq_nr)
{
break;
}
msi_pic->ops->irq_free_msi(msi_pic, desc->irq);
++index;
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}

View File

@@ -0,0 +1,949 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-07 GuEe-GUI first version
*/
#include <drivers/pci_msi.h>
#include <drivers/core/numa.h>
#define DBG_TAG "pci.msi"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
/* PCI has 2048 max IRQs in MSI-X */
static RT_IRQ_AFFINITY_DECLARE(msi_affinity_default[2048]) rt_section(".bss.noclean.pci.msi");
rt_inline void spin_lock(struct rt_spinlock *lock)
{
rt_hw_spin_lock(&lock->lock);
}
rt_inline void spin_unlock(struct rt_spinlock *lock)
{
rt_hw_spin_unlock(&lock->lock);
}
rt_inline void *msix_table_base(struct rt_pci_msix_conf *msix)
{
return msix->table_base + msix->index * PCIM_MSIX_ENTRY_SIZE;
}
rt_inline void *msix_vector_ctrl_base(struct rt_pci_msix_conf *msix)
{
return msix_table_base(msix) + PCIM_MSIX_ENTRY_VECTOR_CTRL;
}
rt_inline void msix_write_vector_ctrl(struct rt_pci_msix_conf *msix,
rt_uint32_t ctrl)
{
void *vc_addr = msix_vector_ctrl_base(msix);
HWREG32(vc_addr) = ctrl;
}
rt_inline void msix_mask(struct rt_pci_msix_conf *msix)
{
msix->msg_ctrl |= PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
msix_write_vector_ctrl(msix, msix->msg_ctrl);
/* Flush write to device */
HWREG32(msix->table_base);
}
static void msix_update_ctrl(struct rt_pci_device *pdev,
rt_uint16_t clear, rt_uint16_t set)
{
rt_uint16_t msgctl;
rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
msgctl &= ~clear;
msgctl |= set;
rt_pci_write_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, msgctl);
}
rt_inline void msix_unmask(struct rt_pci_msix_conf *msix)
{
msix->msg_ctrl &= ~PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
msix_write_vector_ctrl(msix, msix->msg_ctrl);
}
rt_inline rt_uint32_t msi_multi_mask(struct rt_pci_msi_conf *msi)
{
if (msi->cap.multi_msg_max >= 5)
{
return 0xffffffff;
}
return (1 << (1 << msi->cap.multi_msg_max)) - 1;
}
static void msi_write_mask(struct rt_pci_msi_conf *msi,
rt_uint32_t clear, rt_uint32_t set, struct rt_pci_device *pdev)
{
if (msi->cap.is_masking)
{
rt_ubase_t level = rt_spin_lock_irqsave(&pdev->msi_lock);
msi->mask &= ~clear;
msi->mask |= set;
rt_pci_write_config_u32(pdev, msi->mask_pos, msi->mask);
rt_spin_unlock_irqrestore(&pdev->msi_lock, level);
}
}
rt_inline void msi_mask(struct rt_pci_msi_conf *msi,
rt_uint32_t mask, struct rt_pci_device *pdev)
{
msi_write_mask(msi, 0, mask, pdev);
}
rt_inline void msi_unmask(struct rt_pci_msi_conf *msi,
rt_uint32_t mask, struct rt_pci_device *pdev)
{
msi_write_mask(msi, mask, 0, pdev);
}
static void msi_write_enable(struct rt_pci_device *pdev, rt_bool_t enable)
{
rt_uint16_t msgctl;
rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
msgctl &= ~PCIM_MSICTRL_MSI_ENABLE;
if (enable)
{
msgctl |= PCIM_MSICTRL_MSI_ENABLE;
}
rt_pci_write_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, msgctl);
}
static void msi_affinity_init(struct rt_pci_msi_desc *desc, int msi_index,
rt_bitmap_t *cpumasks)
{
int irq;
struct rt_pic_irq *pirq;
struct rt_pci_device *pdev = desc->pdev;
struct rt_pic *msi_pic = pdev->msi_pic;
irq = desc->irq + desc->is_msix ? 0 : msi_index;
pirq = rt_pic_find_pirq(msi_pic, irq);
/* Save affinity */
if (desc->is_msix)
{
desc->affinity = pirq->affinity;
}
else
{
desc->affinities[msi_index] = pirq->affinity;
}
if ((void *)cpumasks > (void *)msi_affinity_default &&
(void *)cpumasks < (void *)msi_affinity_default + sizeof(msi_affinity_default))
{
rt_uint64_t data_address;
/* Get MSI/MSI-X write data adddress */
data_address = desc->msg.address_hi;
data_address <<= 32;
data_address |= desc->msg.address_lo;
/* Prepare affinity */
cpumasks = pirq->affinity;
rt_numa_memory_affinity(data_address, cpumasks);
}
else if (rt_bitmap_next_set_bit(cpumasks, 0, RT_CPUS_NR) >= RT_CPUS_NR)
{
/* No affinity info found, give up */
return;
}
if (!rt_pic_irq_set_affinity(irq, cpumasks))
{
if (msi_pic->ops->irq_write_msi_msg)
{
msi_pic->ops->irq_write_msi_msg(pirq, &desc->msg);
}
}
}
void rt_pci_msi_shutdown(struct rt_pci_device *pdev)
{
struct rt_pci_msi_desc *desc;
if (!pdev)
{
return;
}
msi_write_enable(pdev, RT_FALSE);
rt_pci_intx(pdev, RT_TRUE);
if ((desc = rt_pci_msi_first_desc(pdev)))
{
msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
}
/* Restore pdev->irq to its default pin-assertion IRQ */
pdev->irq = desc->msi.default_irq;
pdev->msi_enabled = RT_FALSE;
}
void rt_pci_msix_shutdown(struct rt_pci_device *pdev)
{
struct rt_pci_msi_desc *desc;
if (!pdev)
{
return;
}
rt_pci_msi_for_each_desc(pdev, desc)
{
msix_mask(&desc->msix);
}
msix_update_ctrl(pdev, PCIM_MSIXCTRL_MSIX_ENABLE, 0);
rt_pci_intx(pdev, RT_TRUE);
pdev->msix_enabled = RT_FALSE;
}
void rt_pci_msi_free_irqs(struct rt_pci_device *pdev)
{
struct rt_pci_msi_desc *desc, *last_desc = RT_NULL;
if (!pdev)
{
return;
}
if (pdev->msix_base)
{
rt_iounmap(pdev->msix_base);
pdev->msix_base = RT_NULL;
}
rt_pci_msi_for_each_desc(pdev, desc)
{
/* To safety */
if (last_desc)
{
rt_list_remove(&last_desc->list);
rt_free(last_desc);
}
last_desc = desc;
}
/* The last one */
if (last_desc)
{
rt_list_remove(&last_desc->list);
rt_free(last_desc);
}
}
void rt_pci_msi_write_msg(struct rt_pci_msi_desc *desc, struct rt_pci_msi_msg *msg)
{
struct rt_pci_device *pdev = desc->pdev;
if (desc->is_msix)
{
void *msix_entry;
rt_bool_t unmasked;
rt_uint32_t msgctl;
struct rt_pci_msix_conf *msix = &desc->msix;
msgctl = msix->msg_ctrl;
unmasked = !(msgctl & PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
msix_entry = msix_table_base(msix);
if (unmasked)
{
msix_write_vector_ctrl(msix, msgctl | PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
}
HWREG32(msix_entry + PCIM_MSIX_ENTRY_LOWER_ADDR) = msg->address_lo;
HWREG32(msix_entry + PCIM_MSIX_ENTRY_UPPER_ADDR) = msg->address_hi;
HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA) = msg->data;
if (unmasked)
{
msix_write_vector_ctrl(msix, msgctl);
}
/* Ensure that the writes are visible in the device */
HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA);
}
else
{
rt_uint16_t msgctl;
int pos = pdev->msi_cap;
struct rt_pci_msi_conf *msi = &desc->msi;
rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
msgctl &= ~PCIM_MSICTRL_MME_MASK;
msgctl |= msi->cap.multi_msg_use << PCIM_MSICTRL_MME_SHIFT;
rt_pci_write_config_u16(pdev, pos + PCIR_MSI_CTRL, msgctl);
rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR, msg->address_lo);
/*
* The value stored in this field is related to the processor system,
* the processor will initialize this field
* when the PCIe device is initialized, and the rules for filling
* in this field are not the same for different processors.
* If the Multiple Message Enable field is not 0b000 (multiple IRQs),
* the PCIe device can send different interrupt requests
* by changing the low data in the Message Data field
*/
if (msi->cap.is_64bit)
{
rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR_HIGH, msg->address_hi);
rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA_64BIT, msg->data);
}
else
{
rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA, msg->data);
}
/* Ensure that the writes are visible in the device */
rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
}
desc->msg = *msg;
if (desc->write_msi_msg)
{
desc->write_msi_msg(desc, desc->write_msi_msg_data);
}
}
void rt_pci_msi_mask_irq(struct rt_pic_irq *pirq)
{
struct rt_pci_msi_desc *desc;
if (pirq && (desc = pirq->msi_desc))
{
if (desc->is_msix)
{
msix_mask(&desc->msix);
}
else
{
msi_mask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
}
}
}
void rt_pci_msi_unmask_irq(struct rt_pic_irq *pirq)
{
struct rt_pci_msi_desc *desc;
if (pirq && (desc = pirq->msi_desc))
{
if (desc->is_msix)
{
msix_unmask(&desc->msix);
}
else
{
msi_unmask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
}
}
}
rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
rt_ssize_t res = -RT_ENOSYS;
if (!pdev || min > max)
{
return -RT_EINVAL;
}
if (flags & RT_PCI_IRQ_F_AFFINITY)
{
if (!affinities)
{
affinities = msi_affinity_default;
}
}
else
{
affinities = RT_NULL;
}
if (flags & RT_PCI_IRQ_F_MSIX)
{
res = rt_pci_msix_enable_range_affinity(pdev, RT_NULL, min, max, affinities);
if (res > 0)
{
return res;
}
}
if (flags & RT_PCI_IRQ_F_MSI)
{
res = rt_pci_msi_enable_range_affinity(pdev, min, max, affinities);
if (res > 0)
{
return res;
}
}
if (flags & RT_PCI_IRQ_F_LEGACY)
{
if (min == 1 && pdev->irq >= 0)
{
if (affinities)
{
int cpuid;
RT_IRQ_AFFINITY_DECLARE(old_affinity);
/* INTx is shared, we should update it */
rt_pic_irq_get_affinity(pdev->irq, old_affinity);
rt_bitmap_for_each_set_bit(affinities[0], cpuid, RT_CPUS_NR)
{
RT_IRQ_AFFINITY_SET(old_affinity, cpuid);
}
rt_pic_irq_set_affinity(pdev->irq, old_affinity);
}
rt_pci_intx(pdev, RT_TRUE);
return min;
}
}
return res;
}
void rt_pci_free_vector(struct rt_pci_device *pdev)
{
if (!pdev)
{
return;
}
rt_pci_msi_disable(pdev);
rt_pci_msix_disable(pdev);
rt_pci_irq_mask(pdev);
}
static rt_err_t msi_verify_entries(struct rt_pci_device *pdev)
{
if (pdev->no_64bit_msi)
{
struct rt_pci_msi_desc *desc;
rt_pci_msi_for_each_desc(pdev, desc)
{
if (desc->msg.address_hi)
{
LOG_D("%s: Arch assigned 64-bit MSI address %08x%08x"
"but device only supports 32 bits",
rt_dm_dev_get_name(&pdev->parent),
desc->msg.address_hi, desc->msg.address_lo);
return -RT_EIO;
}
}
}
return RT_EOK;
}
static rt_err_t msi_insert_desc(struct rt_pci_device *pdev,
struct rt_pci_msi_desc *init_desc)
{
rt_size_t msi_affinity_ptr_size = 0;
struct rt_pci_msi_desc *msi_desc;
if (!init_desc->is_msix)
{
msi_affinity_ptr_size += sizeof(msi_desc->affinities[0]) * 32;
}
msi_desc = rt_calloc(1, sizeof(*msi_desc) + msi_affinity_ptr_size);
if (!msi_desc)
{
return -RT_ENOMEM;
}
rt_memcpy(msi_desc, init_desc, sizeof(*msi_desc));
if (!init_desc->is_msix)
{
msi_desc->affinities = (void *)msi_desc + sizeof(*msi_desc);
}
msi_desc->pdev = pdev;
rt_list_init(&msi_desc->list);
rt_list_insert_before(&pdev->msi_desc_nodes, &msi_desc->list);
return RT_EOK;
}
rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev)
{
rt_uint16_t msgctl;
if (!pdev)
{
return -RT_EINVAL;
}
if (!pdev->msi_cap)
{
return -RT_EINVAL;
}
rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
return 1 << ((msgctl & PCIM_MSICTRL_MMC_MASK) >> 1);
}
rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev)
{
if (!pdev)
{
return -RT_EINVAL;
}
if (!pdev->msi_enabled)
{
return -RT_EINVAL;
}
spin_lock(&pdev->msi_lock);
rt_pci_msi_shutdown(pdev);
rt_pci_msi_free_irqs(pdev);
spin_unlock(&pdev->msi_lock);
return RT_EOK;
}
static rt_err_t msi_setup_msi_desc(struct rt_pci_device *pdev, int nvec)
{
rt_uint16_t msgctl;
struct rt_pci_msi_desc desc;
rt_memset(&desc, 0, sizeof(desc));
desc.vector_used = nvec;
desc.vector_count = rt_pci_msi_vector_count(pdev);
desc.is_msix = RT_FALSE;
rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
desc.msi.cap.is_64bit = !!(msgctl & PCIM_MSICTRL_64BIT);
desc.msi.cap.is_masking = !!(msgctl & PCIM_MSICTRL_VECTOR);
desc.msi.cap.multi_msg_max = (msgctl & PCIM_MSICTRL_MMC_MASK) >> 1;
for (int log2 = 0; log2 < 5; ++log2)
{
if (nvec <= (1 << log2))
{
desc.msi.cap.multi_msg_use = log2;
break;
}
}
if (desc.msi.cap.is_64bit)
{
desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK_64BIT;
}
else
{
desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK;
}
/* Save pdev->irq for its default pin-assertion IRQ */
desc.msi.default_irq = pdev->irq;
if (desc.msi.cap.is_masking)
{
/* Get the old mask status */
rt_pci_read_config_u32(pdev, desc.msi.mask_pos, &desc.msi.mask);
}
return msi_insert_desc(pdev, &desc);
}
static rt_ssize_t msi_capability_init(struct rt_pci_device *pdev,
int nvec, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
rt_err_t err;
struct rt_pci_msi_desc *desc;
msi_write_enable(pdev, RT_FALSE);
spin_lock(&pdev->msi_lock);
if (!(err = msi_setup_msi_desc(pdev, nvec)))
{
/* All MSIs are unmasked by default; mask them all */
desc = rt_pci_msi_first_desc(pdev);
msi_mask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSI)))
{
err = msi_verify_entries(pdev);
}
if (err)
{
msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
}
}
spin_unlock(&pdev->msi_lock);
if (err)
{
rt_pci_msi_free_irqs(pdev);
LOG_E("%s: Setup %s interrupts(%d) error = %s",
rt_dm_dev_get_name(&pdev->parent), "MSI", nvec, rt_strerror(err));
return err;
}
if (affinities)
{
for (int idx = 0; idx < nvec; ++idx)
{
msi_affinity_init(desc, idx, affinities[idx]);
}
}
/* Disable INTX */
rt_pci_intx(pdev, RT_FALSE);
/* Set MSI enabled bits */
msi_write_enable(pdev, RT_TRUE);
pdev->irq = desc->irq;
pdev->msi_enabled = RT_TRUE;
return nvec;
}
rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
int nvec = max;
rt_size_t entries_nr;
if (!pdev || min > max)
{
return -RT_EINVAL;
}
if (pdev->no_msi)
{
return -RT_ENOSYS;
}
if (!pdev->msi_pic)
{
return -RT_ENOSYS;
}
if (pdev->msi_enabled)
{
LOG_W("%s: MSI is enabled", rt_dm_dev_get_name(&pdev->parent));
return -RT_EINVAL;
}
entries_nr = rt_pci_msi_vector_count(pdev);
if (entries_nr < 0)
{
return entries_nr;
}
if (nvec > entries_nr)
{
return -RT_EEMPTY;
}
return msi_capability_init(pdev, nvec, affinities);
}
rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev)
{
rt_uint16_t msgctl;
if (!pdev)
{
return -RT_EINVAL;
}
if (!pdev->msix_cap)
{
return -RT_EINVAL;
}
rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
return rt_pci_msix_table_size(msgctl);
}
rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev)
{
if (!pdev)
{
return -RT_EINVAL;
}
if (!pdev->msix_enabled)
{
return -RT_EINVAL;
}
spin_lock(&pdev->msi_lock);
rt_pci_msix_shutdown(pdev);
rt_pci_msi_free_irqs(pdev);
spin_unlock(&pdev->msi_lock);
return RT_EOK;
}
static void *msix_table_remap(struct rt_pci_device *pdev, rt_size_t entries_nr)
{
rt_uint8_t bir;
rt_uint32_t table_offset;
rt_ubase_t table_base_phys;
rt_pci_read_config_u32(pdev, pdev->msix_cap + PCIR_MSIX_TABLE, &table_offset);
bir = (rt_uint8_t)(table_offset & PCIM_MSIX_BIR_MASK);
if (pdev->resource[bir].flags & PCI_BUS_REGION_F_NONE)
{
LOG_E("%s: BAR[bir = %d] is invalid", rt_dm_dev_get_name(&pdev->parent), bir);
return RT_NULL;
}
table_base_phys = pdev->resource[bir].base + (table_offset & ~PCIM_MSIX_BIR_MASK);
return rt_ioremap((void *)table_base_phys, entries_nr * PCIM_MSIX_ENTRY_SIZE);
}
static rt_err_t msix_setup_msi_descs(struct rt_pci_device *pdev,
void *table_base, struct rt_pci_msix_entry *entries, int nvec)
{
rt_err_t err;
struct rt_pci_msi_desc desc;
rt_memset(&desc, 0, sizeof(desc));
desc.vector_used = 1;
desc.vector_count = rt_pci_msix_vector_count(pdev);
desc.is_msix = RT_TRUE;
desc.msix.table_base = table_base;
for (int i = 0; i < nvec; ++i)
{
void *table_entry;
int index = entries ? entries[i].index : i;
desc.msix.index = index;
table_entry = msix_table_base(&desc.msix);
desc.msix.msg_ctrl = HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL);
if ((err = msi_insert_desc(pdev, &desc)))
{
break;
}
}
return err;
}
static rt_ssize_t msix_capability_init(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int nvec,
RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
rt_err_t err;
rt_uint16_t msgctl;
rt_size_t table_size;
void *table_base, *table_entry;
struct rt_pci_msi_desc *desc;
struct rt_pci_msix_entry *entry;
/*
* Some devices require MSI-X to be enabled before the MSI-X
* registers can be accessed.
* Mask all the vectors to prevent interrupts coming in before
* they're fully set up.
*/
msix_update_ctrl(pdev, 0, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE);
rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
/* Request & Map MSI-X table region */
table_size = rt_pci_msix_table_size(msgctl);
table_base = msix_table_remap(pdev, table_size);
if (!table_base)
{
LOG_E("%s: Remap MSI-X table fail", rt_dm_dev_get_name(&pdev->parent));
err = -RT_ENOMEM;
goto _out_disbale_msix;
}
pdev->msix_base = table_base;
spin_lock(&pdev->msi_lock);
if (!(err = msix_setup_msi_descs(pdev, table_base, entries, nvec)))
{
if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSIX)))
{
/* Check if all MSI entries honor device restrictions */
err = msi_verify_entries(pdev);
}
}
spin_unlock(&pdev->msi_lock);
if (err)
{
rt_pci_msi_free_irqs(pdev);
LOG_E("%s: Setup %s interrupts(%d) error = %s",
rt_dm_dev_get_name(&pdev->parent), "MSI-X", nvec, rt_strerror(err));
goto _out_disbale_msix;
}
entry = entries;
rt_pci_msi_for_each_desc(pdev, desc)
{
if (affinities)
{
msi_affinity_init(desc, desc->msix.index, affinities[entry->index]);
}
entry->irq = desc->irq;
++entry;
}
/* Disable INTX */
rt_pci_intx(pdev, RT_FALSE);
/* Maske all table entries */
table_entry = table_base;
for (int i = 0; i < table_size; ++i, table_entry += PCIM_MSIX_ENTRY_SIZE)
{
HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL) = PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
}
msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK, 0);
pdev->msix_enabled = RT_TRUE;
return nvec;
_out_disbale_msix:
msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE, 0);
return err;
}
rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int min, int max,
RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
int nvec = max;
rt_size_t entries_nr;
if (!pdev || min > max)
{
return -RT_EINVAL;
}
if (pdev->no_msi)
{
return -RT_ENOSYS;
}
if (!pdev->msi_pic)
{
return -RT_ENOSYS;
}
if (pdev->msix_enabled)
{
LOG_W("%s: MSI-X is enabled", rt_dm_dev_get_name(&pdev->parent));
return -RT_EINVAL;
}
entries_nr = rt_pci_msix_vector_count(pdev);
if (entries_nr < 0)
{
return entries_nr;
}
if (nvec > entries_nr)
{
return -RT_EEMPTY;
}
if (!entries)
{
return 0;
}
/* Check if entries is valid */
for (int i = 0; i < nvec; ++i)
{
struct rt_pci_msix_entry *target = &entries[i];
if (target->index >= entries_nr)
{
return -RT_EINVAL;
}
for (int j = i + 1; j < nvec; ++j)
{
/* Check duplicate */
if (target->index == entries[j].index)
{
LOG_E("%s: msix entry[%d].index = entry[%d].index",
rt_dm_dev_get_name(&pdev->parent), i, j);
return -RT_EINVAL;
}
}
}
return msix_capability_init(pdev, entries, nvec, affinities);
}

View File

@@ -0,0 +1,609 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rthw.h>
#include <rtthread.h>
#define DBG_TAG "pci.ofw"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <drivers/pci.h>
#include <drivers/ofw.h>
#include <drivers/ofw_io.h>
#include <drivers/ofw_irq.h>
#include <drivers/ofw_fdt.h>
static rt_err_t pci_ofw_irq_parse(struct rt_pci_device *pdev, struct rt_ofw_cell_args *out_irq)
{
rt_err_t err = RT_EOK;
rt_uint8_t pin;
fdt32_t map_addr[4];
struct rt_pci_device *p2pdev;
struct rt_ofw_node *dev_np, *p2pnode = RT_NULL;
/* Parse device tree if dev have a device node */
dev_np = pdev->parent.ofw_node;
if (dev_np)
{
err = rt_ofw_parse_irq_cells(dev_np, 0, out_irq);
if (err)
{
return err;
}
}
/* Assume #interrupt-cells is 1 */
if ((err = rt_pci_read_config_u8(pdev, PCIR_INTPIN, &pin)))
{
goto _err;
}
/* No pin, exit with no error message. */
if (pin == 0)
{
return -RT_ENOSYS;
}
/* Try local interrupt-map in the device node */
if (rt_ofw_prop_read_raw(dev_np, "interrupt-map", RT_NULL))
{
pin = rt_pci_irq_intx(pdev, pin);
p2pnode = dev_np;
}
/* Walk up the PCI tree */
while (!p2pnode)
{
p2pdev = pdev->bus->self;
/* Is the root bus -> host bridge */
if (rt_pci_is_root_bus(pdev->bus))
{
struct rt_pci_host_bridge *host_bridge = pdev->bus->host_bridge;
p2pnode = host_bridge->parent.ofw_node;
if (!p2pnode)
{
err = -RT_EINVAL;
goto _err;
}
}
else
{
/* Is P2P bridge */
p2pnode = p2pdev->parent.ofw_node;
}
if (p2pnode)
{
break;
}
/* Try get INTx in P2P */
pin = rt_pci_irq_intx(pdev, pin);
pdev = p2pdev;
}
/* For more format detail, please read `components/drivers/ofw/irq.c:ofw_parse_irq_map` */
out_irq->data = map_addr;
out_irq->args_count = 2;
out_irq->args[0] = 3;
out_irq->args[1] = 1;
/* In addr cells */
map_addr[0] = cpu_to_fdt32((pdev->bus->number << 16) | (pdev->devfn << 8));
map_addr[1] = cpu_to_fdt32(0);
map_addr[2] = cpu_to_fdt32(0);
/* In pin cells */
map_addr[3] = cpu_to_fdt32(pin);
err = rt_ofw_parse_irq_map(p2pnode, out_irq);
_err:
if (err == -RT_EEMPTY)
{
LOG_W("PCI-Device<%s> no interrupt-map found, INTx interrupts not available",
rt_dm_dev_get_name(&pdev->parent));
LOG_W("PCI-Device<%s> possibly some PCI slots don't have level triggered interrupts capability",
rt_dm_dev_get_name(&pdev->parent));
}
else if (err && err != -RT_ENOSYS)
{
LOG_E("PCI-Device<%s> irq parse failed with err = %s",
rt_dm_dev_get_name(&pdev->parent), rt_strerror(err));
}
return err;
}
int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
rt_uint8_t slot, rt_uint8_t pin)
{
int irq = -1;
rt_err_t status;
struct rt_ofw_cell_args irq_args;
if (!pdev)
{
goto _end;
}
status = pci_ofw_irq_parse(pdev, &irq_args);
if (status)
{
goto _end;
}
irq = rt_ofw_map_irq(&irq_args);
if (irq >= 0)
{
pdev->intx_pic = rt_pic_dynamic_cast(rt_ofw_data(irq_args.data));
}
_end:
return irq;
}
static rt_err_t pci_ofw_parse_ranges(struct rt_ofw_node *dev_np, const char *propname,
int phy_addr_cells, int phy_size_cells, int cpu_addr_cells,
struct rt_pci_bus_region **out_regions, rt_size_t *out_regions_nr)
{
const fdt32_t *cell;
rt_ssize_t total_cells;
int groups, space_code;
rt_uint32_t phy_addr[3];
rt_uint64_t cpu_addr, phy_addr_size;
*out_regions = RT_NULL;
*out_regions_nr = 0;
cell = rt_ofw_prop_read_raw(dev_np, propname, &total_cells);
if (!cell)
{
return -RT_EEMPTY;
}
groups = total_cells / sizeof(*cell) / (phy_addr_cells + phy_size_cells + cpu_addr_cells);
*out_regions = rt_malloc(groups * sizeof(struct rt_pci_bus_region));
if (!*out_regions)
{
return -RT_ENOMEM;
}
for (int i = 0; i < groups; ++i)
{
/*
* ranges:
* phys.hi cell: npt000ss bbbbbbbb dddddfff rrrrrrrr
* phys.low cell: llllllll llllllll llllllll llllllll
* phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh
*
* n: relocatable region flag (doesn't play a role here)
* p: prefetchable (cacheable) region flag
* t: aliased address flag (doesn't play a role here)
* ss: space code
* 00: configuration space
* 01: I/O space
* 10: 32 bit memory space
* 11: 64 bit memory space
* bbbbbbbb: The PCI bus number
* ddddd: The device number
* fff: The function number. Used for multifunction PCI devices.
* rrrrrrrr: Register number; used for configuration cycles.
*/
for (int j = 0; j < phy_addr_cells; ++j)
{
phy_addr[j] = rt_fdt_read_number(cell++, 1);
}
space_code = (phy_addr[0] >> 24) & 0x3;
cpu_addr = rt_fdt_read_number(cell, cpu_addr_cells);
cell += cpu_addr_cells;
phy_addr_size = rt_fdt_read_number(cell, phy_size_cells);
cell += phy_size_cells;
(*out_regions)[i].phy_addr = ((rt_uint64_t)phy_addr[1] << 32) | phy_addr[2];
(*out_regions)[i].cpu_addr = cpu_addr;
(*out_regions)[i].size = phy_addr_size;
(*out_regions)[i].bus_start = (*out_regions)[i].phy_addr;
if (space_code & 2)
{
(*out_regions)[i].flags = phy_addr[0] & (1U << 30) ?
PCI_BUS_REGION_F_PREFETCH : PCI_BUS_REGION_F_MEM;
}
else if (space_code & 1)
{
(*out_regions)[i].flags = PCI_BUS_REGION_F_IO;
}
else
{
(*out_regions)[i].flags = PCI_BUS_REGION_F_NONE;
}
++*out_regions_nr;
}
return RT_EOK;
}
rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err;
int phy_addr_cells = -1, phy_size_cells = -1, cpu_addr_cells;
if (!dev_np || !host_bridge)
{
return -RT_EINVAL;
}
cpu_addr_cells = rt_ofw_io_addr_cells(dev_np);
rt_ofw_prop_read_s32(dev_np, "#address-cells", &phy_addr_cells);
rt_ofw_prop_read_s32(dev_np, "#size-cells", &phy_size_cells);
if (phy_addr_cells != 3 || phy_size_cells < 1 || cpu_addr_cells < 1)
{
return -RT_EINVAL;
}
if (pci_ofw_parse_ranges(dev_np, "ranges",
phy_addr_cells, phy_size_cells, cpu_addr_cells,
&host_bridge->bus_regions, &host_bridge->bus_regions_nr))
{
return -RT_EINVAL;
}
if ((err = rt_pci_region_setup(host_bridge)))
{
rt_free(host_bridge->bus_regions);
host_bridge->bus_regions_nr = 0;
return err;
}
err = pci_ofw_parse_ranges(dev_np, "dma-ranges",
phy_addr_cells, phy_size_cells, cpu_addr_cells,
&host_bridge->dma_regions, &host_bridge->dma_regions_nr);
if (err != -RT_EEMPTY)
{
rt_free(host_bridge->bus_regions);
host_bridge->bus_regions_nr = 0;
LOG_E("%s: Read dma-ranges error = %s", rt_ofw_node_full_name(dev_np),
rt_strerror(err));
return err;
}
return RT_EOK;
}
rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err;
const char *propname;
if (!dev_np || !host_bridge)
{
return -RT_EINVAL;
}
host_bridge->irq_slot = rt_pci_irq_slot;
host_bridge->irq_map = rt_pci_ofw_irq_parse_and_map;
if (rt_ofw_prop_read_u32_array_index(dev_np, "bus-range", 0, 2, host_bridge->bus_range) < 0)
{
return -RT_EIO;
}
propname = rt_ofw_get_prop_fuzzy_name(dev_np, ",pci-domain$");
rt_ofw_prop_read_u32(dev_np, propname, &host_bridge->domain);
err = rt_pci_ofw_parse_ranges(dev_np, host_bridge);
return err;
}
rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus)
{
rt_err_t err = RT_EOK;
return err;
}
rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus)
{
rt_err_t err = RT_EOK;
return err;
}
/*
* RID (Requester ID) is formatted such that:
* Bits [15:8] are the Bus number.
* Bits [7:3] are the Device number.
* Bits [2:0] are the Function number.
*
* msi-map: Maps a Requester ID to an MSI controller and associated
* msi-specifier data. The property is an arbitrary number of tuples of
* (rid-base,msi-controller,msi-base,length), where:
*
* - rid-base is a single cell describing the first RID matched by the entry.
*
* - msi-controller is a single phandle to an MSI controller
*
* - msi-base is an msi-specifier describing the msi-specifier produced for
* the first RID matched by the entry.
*
* - length is a single cell describing how many consecutive RIDs are matched
* following the rid-base.
*
* Any RID r in the interval [rid-base, rid-base + length) is associated with
* the listed msi-controller, with the msi-specifier (r - rid-base + msi-base).
*
* msi-map-mask: A mask to be applied to each Requester ID prior to being mapped
* to an msi-specifier per the msi-map property.
*
* msi-parent: Describes the MSI parent of the root complex itself. Where
* the root complex and MSI controller do not pass sideband data with MSI
* writes, this property may be used to describe the MSI controller(s)
* used by PCI devices under the root complex, if defined as such in the
* binding for the root complex.
*
* / {
* #address-cells = <1>;
* #size-cells = <1>;
*
* msi_a: msi-controller@a {
* reg = <0xa 0x1>;
* msi-controller;
* #msi-cells = <1>;
* };
*
* msi_b: msi-controller@b {
* reg = <0xb 0x1>;
* msi-controller;
* #msi-cells = <1>;
* };
*
* msi_c: msi-controller@c {
* reg = <0xc 0x1>;
* msi-controller;
* #msi-cells = <1>;
* };
*
* Example (1)
* ===========
* pci: pci@f {
* reg = <0xf 0x1>;
* device_type = "pci";
*
* // The sideband data provided to the MSI controller is
* // the RID, identity-mapped.
* msi-map = <0x0 &msi_a 0x0 0x10000>;
* };
*
* Example (2)
* ===========
* pci: pci@ff {
* reg = <0xff 0x1>;
* device_type = "pci";
*
* // The sideband data provided to the MSI controller is
* // the RID, masked to only the device and function bits.
* msi-map = <0x0 &msi_a 0x0 0x100>;
* msi-map-mask = <0xff>
* };
*
* Example (3)
* ===========
* pci: pci@fff {
* reg = <0xfff 0x1>;
* device_type = "pci";
*
* // The sideband data provided to the MSI controller is
* // the RID, but the high bit of the bus number is ignored.
* msi-map = <0x0000 &msi_a 0x0000 0x8000>,
* <0x8000 &msi_a 0x0000 0x8000>;
* };
*
* Example (4)
* ===========
* pci: pci@f {
* reg = <0xf 0x1>;
* device_type = "pci";
*
* // The sideband data provided to the MSI controller is
* // the RID, but the high bit of the bus number is negated.
* msi-map = <0x0000 &msi 0x8000 0x8000>,
* <0x8000 &msi 0x0000 0x8000>;
* };
*
* Example (5)
* ===========
* pci: pci@f {
* reg = <0xf 0x1>;
* device_type = "pci";
*
* // The sideband data provided to MSI controller a is the
* // RID, but the high bit of the bus number is negated.
* // The sideband data provided to MSI controller b is the
* // RID, identity-mapped.
* // MSI controller c is not addressable.
* msi-map = <0x0000 &msi_a 0x8000 0x08000>,
* <0x8000 &msi_a 0x0000 0x08000>,
* <0x0000 &msi_b 0x0000 0x10000>;
* };
* };
*/
static void ofw_msi_pic_init(struct rt_pci_device *pdev)
{
#ifdef RT_PCI_MSI
rt_uint32_t rid;
struct rt_pci_host_bridge *bridge;
struct rt_ofw_node *np, *msi_ic_np = RT_NULL;
/*
* NOTE: Typically, a device's RID is equal to the PCI device's ID.
* However, in complex bus management scenarios such as servers and PCs,
* the RID needs to be associated with DMA. In these cases,
* the RID should be equal to the DMA alias assigned to the
* PCI device by the system bus.
*/
rid = rt_pci_dev_id(pdev);
bridge = rt_pci_find_host_bridge(pdev->bus);
RT_ASSERT(bridge != RT_NULL);
np = bridge->parent.ofw_node;
if (!(msi_ic_np = rt_ofw_parse_phandle(np, "msi-parent", 0)))
{
rt_ofw_map_id(np, rid, "msi-map", "msi-map-mask", &msi_ic_np, RT_NULL);
}
if (!msi_ic_np)
{
LOG_W("%s: MSI PIC not found", rt_dm_dev_get_name(&pdev->parent));
return;
}
pdev->msi_pic = rt_pic_dynamic_cast(rt_ofw_data(msi_ic_np));
if (!pdev->msi_pic)
{
LOG_W("%s: '%s' not supported", rt_dm_dev_get_name(&pdev->parent), "msi-parent");
goto _out_put_msi_parent_node;
}
if (!pdev->msi_pic->ops->irq_compose_msi_msg)
{
LOG_E("%s: MSI pic MUST implemented %s",
rt_ofw_node_full_name(msi_ic_np), "irq_compose_msi_msg");
RT_ASSERT(0);
}
if (!pdev->msi_pic->ops->irq_alloc_msi)
{
LOG_E("%s: MSI pic MUST implemented %s",
rt_ofw_node_full_name(msi_ic_np), "irq_alloc_msi");
RT_ASSERT(0);
}
if (!pdev->msi_pic->ops->irq_free_msi)
{
LOG_E("%s: MSI pic MUST implemented %s",
rt_ofw_node_full_name(msi_ic_np), "irq_free_msi");
RT_ASSERT(0);
}
_out_put_msi_parent_node:
rt_ofw_node_put(msi_ic_np);
#endif
}
static rt_int32_t ofw_pci_devfn(struct rt_ofw_node *np)
{
rt_int32_t res;
rt_uint32_t reg[5];
res = rt_ofw_prop_read_u32_array_index(np, "reg", 0, RT_ARRAY_SIZE(reg), reg);
return res > 0 ? ((reg[0] >> 8) & 0xff) : res;
}
static struct rt_ofw_node *ofw_find_device(struct rt_ofw_node *np, rt_uint32_t devfn)
{
struct rt_ofw_node *dev_np, *mfd_np;
rt_ofw_foreach_child_node(np, dev_np)
{
if (ofw_pci_devfn(dev_np) == devfn)
{
return dev_np;
}
if (rt_ofw_node_tag_equ(dev_np, "multifunc-device"))
{
rt_ofw_foreach_child_node(dev_np, mfd_np)
{
if (ofw_pci_devfn(mfd_np) == devfn)
{
rt_ofw_node_put(dev_np);
return mfd_np;
}
}
}
}
return RT_NULL;
}
rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev)
{
struct rt_ofw_node *np = RT_NULL;
if (!pdev)
{
return -RT_EINVAL;
}
ofw_msi_pic_init(pdev);
if (rt_pci_is_root_bus(pdev->bus) || !pdev->bus->self)
{
struct rt_pci_host_bridge *host_bridge;
host_bridge = rt_pci_find_host_bridge(pdev->bus);
RT_ASSERT(host_bridge != RT_NULL);
np = host_bridge->parent.ofw_node;
}
else
{
np = pdev->bus->self->parent.ofw_node;
}
if (np)
{
pdev->parent.ofw_node = ofw_find_device(np, pdev->devfn);
}
return RT_EOK;
}
rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev)
{
if (!pdev)
{
return -RT_EINVAL;
}
rt_ofw_node_put(pdev->parent.ofw_node);
return RT_EOK;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,272 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __PCI_IDS_H__
#define __PCI_IDS_H__
#define PCI_VENDOR_ID_LOONGSON 0x0014
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_VENDOR_ID_DYNALINK 0x0675
#define PCI_VENDOR_ID_UBIQUITI 0x0777
#define PCI_VENDOR_ID_BERKOM 0x0871
#define PCI_VENDOR_ID_COMPAQ 0x0e11
#define PCI_VENDOR_ID_NCR 0x1000
#define PCI_VENDOR_ID_ATI 0x1002
#define PCI_VENDOR_ID_VLSI 0x1004
#define PCI_VENDOR_ID_ADL 0x1005
#define PCI_VENDOR_ID_NS 0x100b
#define PCI_VENDOR_ID_TSENG 0x100c
#define PCI_VENDOR_ID_WEITEK 0x100e
#define PCI_VENDOR_ID_DEC 0x1011
#define PCI_VENDOR_ID_CIRRUS 0x1013
#define PCI_VENDOR_ID_IBM 0x1014
#define PCI_VENDOR_ID_UNISYS 0x1018
#define PCI_VENDOR_ID_COMPEX2 0x101a
#define PCI_VENDOR_ID_WD 0x101c
#define PCI_VENDOR_ID_AMI 0x101e
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_VENDOR_ID_AI 0x1025
#define PCI_VENDOR_ID_DELL 0x1028
#define PCI_VENDOR_ID_MATROX 0x102b
#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2
#define PCI_VENDOR_ID_CT 0x102c
#define PCI_VENDOR_ID_MIRO 0x1031
#define PCI_VENDOR_ID_NEC 0x1033
#define PCI_VENDOR_ID_FD 0x1036
#define PCI_VENDOR_ID_SI 0x1039
#define PCI_VENDOR_ID_HP 0x103c
#define PCI_VENDOR_ID_PCTECH 0x1042
#define PCI_VENDOR_ID_ASUSTEK 0x1043
#define PCI_VENDOR_ID_DPT 0x1044
#define PCI_VENDOR_ID_OPTI 0x1045
#define PCI_VENDOR_ID_ELSA 0x1048
#define PCI_VENDOR_ID_STMICRO 0x104a
#define PCI_VENDOR_ID_BUSLOGIC 0x104b
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_VENDOR_ID_SONY 0x104d
#define PCI_VENDOR_ID_ANIGMA 0x1051
#define PCI_VENDOR_ID_EFAR 0x1055
#define PCI_VENDOR_ID_MOTOROLA 0x1057
#define PCI_VENDOR_ID_PROMISE 0x105a
#define PCI_VENDOR_ID_FOXCONN 0x105b
#define PCI_VENDOR_ID_UMC 0x1060
#define PCI_VENDOR_ID_PICOPOWER 0x1066
#define PCI_VENDOR_ID_MYLEX 0x1069
#define PCI_VENDOR_ID_APPLE 0x106b
#define PCI_VENDOR_ID_YAMAHA 0x1073
#define PCI_VENDOR_ID_QLOGIC 0x1077
#define PCI_VENDOR_ID_CYRIX 0x1078
#define PCI_VENDOR_ID_CONTAQ 0x1080
#define PCI_VENDOR_ID_OLICOM 0x108d
#define PCI_VENDOR_ID_SUN 0x108e
#define PCI_VENDOR_ID_NI 0x1093
#define PCI_VENDOR_ID_CMD 0x1095
#define PCI_VENDOR_ID_BROOKTREE 0x109e
#define PCI_VENDOR_ID_SGI 0x10a9
#define PCI_VENDOR_ID_WINBOND 0x10ad
#define PCI_VENDOR_ID_PLX 0x10b5
#define PCI_VENDOR_ID_MADGE 0x10b6
#define PCI_VENDOR_ID_3COM 0x10b7
#define PCI_VENDOR_ID_AL 0x10b9
#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
#define PCI_VENDOR_ID_TCONRAD 0x10da
#define PCI_VENDOR_ID_ROHM 0x10db
#define PCI_VENDOR_ID_NVIDIA 0x10de
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_VENDOR_ID_AMCC 0x10e8
#define PCI_VENDOR_ID_INTERG 0x10ea
#define PCI_VENDOR_ID_REALTEK 0x10ec
#define PCI_VENDOR_ID_XILINX 0x10ee
#define PCI_VENDOR_ID_INIT 0x1101
#define PCI_VENDOR_ID_CREATIVE 0x1102
#define PCI_VENDOR_ID_ECTIVA PCI_VENDOR_ID_CREATIVE
#define PCI_VENDOR_ID_TTI 0x1103
#define PCI_VENDOR_ID_SIGMA 0x1105
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_VENDOR_ID_SIEMENS 0x110a
#define PCI_VENDOR_ID_VORTEX 0x1119
#define PCI_VENDOR_ID_EF 0x111a
#define PCI_VENDOR_ID_IDT 0x111d
#define PCI_VENDOR_ID_FORE 0x1127
#define PCI_VENDOR_ID_PHILIPS 0x1131
#define PCI_VENDOR_ID_EICON 0x1133
#define PCI_VENDOR_ID_CISCO 0x1137
#define PCI_VENDOR_ID_ZIATECH 0x1138
#define PCI_VENDOR_ID_SYSKONNECT 0x1148
#define PCI_VENDOR_ID_DIGI 0x114f
#define PCI_VENDOR_ID_XIRCOM 0x115d
#define PCI_VENDOR_ID_SERVERWORKS 0x1166
#define PCI_VENDOR_ID_ALTERA 0x1172
#define PCI_VENDOR_ID_SBE 0x1176
#define PCI_VENDOR_ID_TOSHIBA 0x1179
#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
#define PCI_VENDOR_ID_ATTO 0x117c
#define PCI_VENDOR_ID_RICOH 0x1180
#define PCI_VENDOR_ID_DLINK 0x1186
#define PCI_VENDOR_ID_ARTOP 0x1191
#define PCI_VENDOR_ID_ZEITNET 0x1193
#define PCI_VENDOR_ID_FUJITSU_ME 0x119e
#define PCI_VENDOR_ID_MARVELL 0x11ab
#define PCI_VENDOR_ID_V3 0x11b0
#define PCI_VENDOR_ID_ATT 0x11c1
#define PCI_VENDOR_ID_SPECIALIX 0x11cb
#define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4
#define PCI_VENDOR_ID_ZORAN 0x11de
#define PCI_VENDOR_ID_COMPEX 0x11f6
#define PCI_VENDOR_ID_PMC_Sierra 0x11f8
#define PCI_VENDOR_ID_RP 0x11fe
#define PCI_VENDOR_ID_CYCLADES 0x120e
#define PCI_VENDOR_ID_ESSENTIAL 0x120f
#define PCI_VENDOR_ID_O2 0x1217
#define PCI_VENDOR_ID_3DFX 0x121a
#define PCI_VENDOR_ID_QEMU 0x1234
#define PCI_VENDOR_ID_AVM 0x1244
#define PCI_VENDOR_ID_STALLION 0x124d
#define PCI_VENDOR_ID_ESS 0x125d
#define PCI_VENDOR_ID_SATSAGEM 0x1267
#define PCI_VENDOR_ID_ENSONIQ 0x1274
#define PCI_VENDOR_ID_TRANSMETA 0x1279
#define PCI_VENDOR_ID_ROCKWELL 0x127a
#define PCI_VENDOR_ID_ITE 0x1283
#define PCI_VENDOR_ID_ALTEON 0x12ae
#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
#define PCI_VENDOR_ID_PERICOM 0x12d8
#define PCI_VENDOR_ID_AUREAL 0x12eb
#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8
#define PCI_VENDOR_ID_ESDGMBH 0x12fe
#define PCI_VENDOR_ID_CB 0x1307
#define PCI_VENDOR_ID_SIIG 0x131f
#define PCI_VENDOR_ID_RADISYS 0x1331
#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332
#define PCI_VENDOR_ID_DOMEX 0x134a
#define PCI_VENDOR_ID_INTASHIELD 0x135a
#define PCI_VENDOR_ID_QUATECH 0x135c
#define PCI_VENDOR_ID_SEALEVEL 0x135e
#define PCI_VENDOR_ID_HYPERCOPE 0x1365
#define PCI_VENDOR_ID_DIGIGRAM 0x1369
#define PCI_VENDOR_ID_KAWASAKI 0x136b
#define PCI_VENDOR_ID_CNET 0x1371
#define PCI_VENDOR_ID_LMC 0x1376
#define PCI_VENDOR_ID_NETGEAR 0x1385
#define PCI_VENDOR_ID_APPLICOM 0x1389
#define PCI_VENDOR_ID_MOXA 0x1393
#define PCI_VENDOR_ID_CCD 0x1397
#define PCI_VENDOR_ID_EXAR 0x13a8
#define PCI_VENDOR_ID_MICROGATE 0x13c0
#define PCI_VENDOR_ID_3WARE 0x13c1
#define PCI_VENDOR_ID_IOMEGA 0x13ca
#define PCI_VENDOR_ID_ABOCOM 0x13d1
#define PCI_VENDOR_ID_SUNDANCE 0x13f0
#define PCI_VENDOR_ID_CMEDIA 0x13f6
#define PCI_VENDOR_ID_ADVANTECH 0x13fe
#define PCI_VENDOR_ID_MEILHAUS 0x1402
#define PCI_VENDOR_ID_LAVA 0x1407
#define PCI_VENDOR_ID_TIMEDIA 0x1409
#define PCI_VENDOR_ID_ICE 0x1412
#define PCI_VENDOR_ID_MICROSOFT 0x1414
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_VENDOR_ID_CHELSIO 0x1425
#define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_SAMSUNG 0x144d
#define PCI_VENDOR_ID_GIGABYTE 0x1458
#define PCI_VENDOR_ID_AMBIT 0x1468
#define PCI_VENDOR_ID_MYRICOM 0x14c1
#define PCI_VENDOR_ID_MEDIATEK 0x14c3
#define PCI_VENDOR_ID_TITAN 0x14d2
#define PCI_VENDOR_ID_PANACOM 0x14d4
#define PCI_VENDOR_ID_SIPACKETS 0x14d9
#define PCI_VENDOR_ID_AFAVLAB 0x14db
#define PCI_VENDOR_ID_AMPLICON 0x14dc
#define PCI_VENDOR_ID_BCM_GVC 0x14a4
#define PCI_VENDOR_ID_TOPIC 0x151f
#define PCI_VENDOR_ID_MAINPINE 0x1522
#define PCI_VENDOR_ID_SYBA 0x1592
#define PCI_VENDOR_ID_MORETON 0x15aa
#define PCI_VENDOR_ID_VMWARE 0x15ad
#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
#define PCI_VENDOR_ID_MELLANOX 0x15b3
#define PCI_VENDOR_ID_DFI 0x15bd
#define PCI_VENDOR_ID_QUICKNET 0x15e2
#define PCI_VENDOR_ID_PDC 0x15e9
#define PCI_VENDOR_ID_FARSITE 0x1619
#define PCI_VENDOR_ID_ARIMA 0x161f
#define PCI_VENDOR_ID_BROCADE 0x1657
#define PCI_VENDOR_ID_SIBYTE 0x166d
#define PCI_VENDOR_ID_ATHEROS 0x168c
#define PCI_VENDOR_ID_NETCELL 0x169c
#define PCI_VENDOR_ID_CENATEK 0x16ca
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#define PCI_VENDOR_ID_USR 0x16ec
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_VENDOR_ID_LINKSYS 0x1737
#define PCI_VENDOR_ID_ALTIMA 0x173b
#define PCI_VENDOR_ID_CAVIUM 0x177d
#define PCI_VENDOR_ID_TECHWELL 0x1797
#define PCI_VENDOR_ID_BELKIN 0x1799
#define PCI_VENDOR_ID_RDC 0x17f3
#define PCI_VENDOR_ID_GLI 0x17a0
#define PCI_VENDOR_ID_LENOVO 0x17aa
#define PCI_VENDOR_ID_QCOM 0x17cb
#define PCI_VENDOR_ID_CDNS 0x17cd
#define PCI_VENDOR_ID_ARECA 0x17d3
#define PCI_VENDOR_ID_S2IO 0x17d5
#define PCI_VENDOR_ID_SITECOM 0x182d
#define PCI_VENDOR_ID_TOPSPIN 0x1867
#define PCI_VENDOR_ID_COMMTECH 0x18f7
#define PCI_VENDOR_ID_SILAN 0x1904
#define PCI_VENDOR_ID_RENESAS 0x1912
#define PCI_VENDOR_ID_SOLARFLARE 0x1924
#define PCI_VENDOR_ID_TDI 0x192e
#define PCI_VENDOR_ID_FREESCALE 0x1957
#define PCI_VENDOR_ID_NXP PCI_VENDOR_ID_FREESCALE
#define PCI_VENDOR_ID_PASEMI 0x1959
#define PCI_VENDOR_ID_ATTANSIC 0x1969
#define PCI_VENDOR_ID_JMICRON 0x197b
#define PCI_VENDOR_ID_KORENIX 0x1982
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define PCI_VENDOR_ID_NETRONOME 0x19ee
#define PCI_VENDOR_ID_QMI 0x1a32
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
#define PCI_VENDOR_ID_AMAZON 0x1d0f
#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
#define PCI_VENDOR_ID_HYGON 0x1d94
#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
#define PCI_VENDOR_ID_HXT 0x1dbf
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_VENDOR_ID_TEHUTI 0x1fc9
#define PCI_VENDOR_ID_SUNIX 0x1fd4
#define PCI_VENDOR_ID_HINT 0x3388
#define PCI_VENDOR_ID_3DLABS 0x3d3d
#define PCI_VENDOR_ID_NETXEN 0x4040
#define PCI_VENDOR_ID_AKS 0x416c
#define PCI_VENDOR_ID_ACCESSIO 0x494f
#define PCI_VENDOR_ID_S3 0x5333
#define PCI_VENDOR_ID_DUNORD 0x5544
#define PCI_VENDOR_ID_DCI 0x6666
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_VENDOR_ID_COMPUTONE 0x8e0e
#define PCI_VENDOR_ID_KTI 0x8e2e
#define PCI_VENDOR_ID_ADAPTEC 0x9004
#define PCI_VENDOR_ID_ADAPTEC2 0x9005
#define PCI_VENDOR_ID_HOLTEK 0x9412
#define PCI_VENDOR_ID_NETMOS 0x9710
#define PCI_VENDOR_ID_3COM_2 0xa727
#define PCI_VENDOR_ID_DIGIUM 0xd161
#define PCI_VENDOR_ID_TIGERJET 0xe159
#define PCI_VENDOR_ID_XILINX_RME 0xea60
#define PCI_VENDOR_ID_XEN 0x5853
#define PCI_VENDOR_ID_OCZ 0x1b85
#define PCI_VENDOR_ID_NCUBE 0x10ff
#endif /* __PCI_IDS_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,121 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <drivers/pci.h>
#include <drivers/core/power_domain.h>
#define DBG_TAG "pci.pme"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
/*
* Power Management Capability Register:
*
* 31 27 26 25 24 22 21 20 19 18 16 15 8 7 0
* +---------+---+---+--------+---+---+---+------+-----------+----------------+
* | | | | | | | | | | Capabilitiy ID |
* +---------+---+---+--------+---+---+---+------+-----------+----------------+
* ^ ^ ^ ^ ^ ^ ^ ^ ^
* | | | | | | | | |
* | | | | | | | | +---- Next Capabilitiy Pointer
* | | | | | | | +------------- Version
* | | | | | | +------------------- PME Clock
* | | | | | +----------------------- Immediate Readiness on Return to D0
* | | | | +--------------------------- Device Specifiic Initializtion
* | | | +--------------------------------- Aux Current
* | | +---------------------------------------- D1 Support
* | +-------------------------------------------- D2 Support
* +--------------------------------------------------- PME Support
*/
void rt_pci_pme_init(struct rt_pci_device *pdev)
{
rt_uint16_t pmc;
if (!pdev || !(pdev->pme_cap = rt_pci_find_capability(pdev, PCIY_PMG)))
{
return;
}
rt_pci_read_config_u16(pdev, pdev->pme_cap + PCIR_POWER_CAP, &pmc);
if ((pmc & PCIM_PCAP_SPEC) > 3)
{
LOG_E("%s: Unsupported PME CAP regs spec %u",
rt_dm_dev_get_name(&pdev->parent), pmc & PCIM_PCAP_SPEC);
return;
}
pmc &= PCIM_PCAP_PMEMASK;
if (pmc)
{
pdev->pme_support = RT_FIELD_GET(PCIM_PCAP_PMEMASK, pmc);
rt_pci_pme_active(pdev, RT_FALSE);
}
}
rt_err_t rt_pci_enable_wake(struct rt_pci_device *pdev,
enum rt_pci_power state, rt_bool_t enable)
{
if (!pdev || state >= RT_PCI_PME_MAX)
{
return -RT_EINVAL;
}
if (enable)
{
if (rt_pci_pme_capable(pdev, state) ||
rt_pci_pme_capable(pdev, RT_PCI_D3COLD))
{
rt_pci_pme_active(pdev, RT_EOK);
}
}
else
{
rt_pci_pme_active(pdev, RT_FALSE);
}
return RT_EOK;
}
static void pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable)
{
rt_uint16_t pmcsr;
if (!pdev->pme_support)
{
return;
}
rt_pci_read_config_u16(pdev, pdev->pme_cap + PCIR_POWER_STATUS, &pmcsr);
/* Clear PME_Status by writing 1 to it and enable PME# */
pmcsr |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
if (!enable)
{
pmcsr &= ~PCIM_PSTAT_PMEENABLE;
}
rt_pci_write_config_u16(pdev, pdev->pme_cap + PCIR_POWER_STATUS, pmcsr);
}
void rt_pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable)
{
if (!pdev)
{
return;
}
pci_pme_active(pdev, enable);
rt_dm_power_domain_attach(&pdev->parent, enable);
}

View File

@@ -0,0 +1,926 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rtthread.h>
#define DBG_TAG "pci.probe"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <drivers/pci.h>
#include <drivers/core/bus.h>
rt_inline void spin_lock(struct rt_spinlock *spinlock)
{
rt_hw_spin_lock(&spinlock->lock);
}
rt_inline void spin_unlock(struct rt_spinlock *spinlock)
{
rt_hw_spin_unlock(&spinlock->lock);
}
struct rt_pci_host_bridge *rt_pci_host_bridge_alloc(rt_size_t priv_size)
{
struct rt_pci_host_bridge *bridge = rt_calloc(1, sizeof(*bridge) + priv_size);
return bridge;
}
rt_err_t rt_pci_host_bridge_free(struct rt_pci_host_bridge *bridge)
{
if (!bridge)
{
return -RT_EINVAL;
}
if (bridge->bus_regions)
{
rt_free(bridge->bus_regions);
}
if (bridge->dma_regions)
{
rt_free(bridge->dma_regions);
}
rt_free(bridge);
return RT_EOK;
}
rt_err_t rt_pci_host_bridge_init(struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err = RT_EOK;
if (host_bridge->parent.ofw_node)
{
err = rt_pci_ofw_host_bridge_init(host_bridge->parent.ofw_node, host_bridge);
}
return err;
}
struct rt_pci_device *rt_pci_alloc_device(struct rt_pci_bus *bus)
{
struct rt_pci_device *pdev = rt_calloc(1, sizeof(*pdev));
if (!pdev)
{
return RT_NULL;
}
rt_list_init(&pdev->list);
pdev->bus = bus;
if (bus)
{
spin_lock(&bus->lock);
rt_list_insert_before(&bus->devices_nodes, &pdev->list);
spin_unlock(&bus->lock);
}
pdev->subsystem_vendor = PCI_ANY_ID;
pdev->subsystem_device = PCI_ANY_ID;
pdev->irq = -1;
for (int i = 0; i < RT_ARRAY_SIZE(pdev->resource); ++i)
{
pdev->resource[i].flags = PCI_BUS_REGION_F_NONE;
}
#ifdef RT_PCI_MSI
rt_list_init(&pdev->msi_desc_nodes);
rt_spin_lock_init(&pdev->msi_lock);
#endif
return pdev;
}
struct rt_pci_device *rt_pci_scan_single_device(struct rt_pci_bus *bus, rt_uint32_t devfn)
{
rt_err_t err;
struct rt_pci_device *pdev = RT_NULL;
rt_uint16_t vendor = PCI_ANY_ID, device = PCI_ANY_ID;
if (!bus)
{
goto _end;
}
err = rt_pci_bus_read_config_u16(bus, devfn, PCIR_VENDOR, &vendor);
rt_pci_bus_read_config_u16(bus, devfn, PCIR_DEVICE, &device);
if (vendor == (typeof(vendor))PCI_ANY_ID ||
vendor == (typeof(vendor))0x0000 || err)
{
goto _end;
}
if (!(pdev = rt_pci_alloc_device(bus)))
{
goto _end;
}
pdev->devfn = devfn;
pdev->vendor = vendor;
pdev->device = device;
rt_dm_dev_set_name(&pdev->parent, "%04x:%02x:%02x.%u",
rt_pci_domain(pdev), pdev->bus->number,
RT_PCI_SLOT(pdev->devfn), RT_PCI_FUNC(pdev->devfn));
if (rt_pci_setup_device(pdev))
{
rt_free(pdev);
pdev = RT_NULL;
goto _end;
}
rt_pci_device_register(pdev);
_end:
return pdev;
}
static rt_bool_t pci_intx_mask_broken(struct rt_pci_device *pdev)
{
rt_bool_t res = RT_FALSE;
rt_uint16_t orig, toggle, new;
rt_pci_read_config_u16(pdev, PCIR_COMMAND, &orig);
toggle = orig ^ PCIM_CMD_INTxDIS;
rt_pci_write_config_u16(pdev, PCIR_COMMAND, toggle);
rt_pci_read_config_u16(pdev, PCIR_COMMAND, &new);
rt_pci_write_config_u16(pdev, PCIR_COMMAND, orig);
if (new != toggle)
{
res = RT_TRUE;
}
return res;
}
static void pci_read_irq(struct rt_pci_device *pdev)
{
rt_uint8_t irq = 0;
rt_pci_read_config_u8(pdev, PCIR_INTPIN, &irq);
pdev->pin = irq;
if (irq)
{
rt_pci_read_config_u8(pdev, PCIR_INTLINE, &irq);
}
pdev->irq = irq;
}
static void pcie_set_port_type(struct rt_pci_device *pdev)
{
int pos;
if (!(pos = rt_pci_find_capability(pdev, PCIY_EXPRESS)))
{
return;
}
pdev->pcie_cap = pos;
}
static void pci_configure_ari(struct rt_pci_device *pdev)
{
rt_uint32_t cap, ctl2_ari;
struct rt_pci_device *bridge;
if (!rt_pci_is_pcie(pdev) || pdev->devfn)
{
return;
}
bridge = pdev->bus->self;
if (rt_pci_is_root_bus(pdev->bus) || !bridge)
{
return;
}
rt_pci_read_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CAP2, &cap);
if (!(cap & PCIEM_CAP2_ARI))
{
return;
}
rt_pci_read_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CTL2, &ctl2_ari);
if (rt_pci_find_ext_capability(pdev, PCIZ_ARI))
{
ctl2_ari |= PCIEM_CTL2_ARI;
bridge->ari_enabled = RT_TRUE;
}
else
{
ctl2_ari &= ~PCIEM_CTL2_ARI;
bridge->ari_enabled = RT_FALSE;
}
rt_pci_write_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CTL2, ctl2_ari);
}
static rt_uint16_t pci_cfg_space_size_ext(struct rt_pci_device *pdev)
{
rt_uint32_t status;
if (rt_pci_read_config_u32(pdev, PCI_REGMAX + 1, &status))
{
return PCI_REGMAX + 1;
}
return PCIE_REGMAX + 1;
}
static rt_uint16_t pci_cfg_space_size(struct rt_pci_device *pdev)
{
int pos;
rt_uint32_t status;
rt_uint16_t class = pdev->class >> 8;
if (class == PCIS_BRIDGE_HOST)
{
return pci_cfg_space_size_ext(pdev);
}
if (rt_pci_is_pcie(pdev))
{
return pci_cfg_space_size_ext(pdev);
}
pos = rt_pci_find_capability(pdev, PCIY_PCIX);
if (!pos)
{
return PCI_REGMAX + 1;
}
rt_pci_read_config_u32(pdev, pos + PCIXR_STATUS, &status);
if (status & (PCIXM_STATUS_266CAP | PCIXM_STATUS_533CAP))
{
return pci_cfg_space_size_ext(pdev);
}
return PCI_REGMAX + 1;
}
static void pci_init_capabilities(struct rt_pci_device *pdev)
{
rt_pci_pme_init(pdev);
#ifdef RT_PCI_MSI
rt_pci_msi_init(pdev); /* Disable MSI */
rt_pci_msix_init(pdev); /* Disable MSI-X */
#endif
pcie_set_port_type(pdev);
pdev->cfg_size = pci_cfg_space_size(pdev);
pci_configure_ari(pdev);
pdev->no_msi = RT_FALSE;
pdev->msi_enabled = RT_FALSE;
pdev->msix_enabled = RT_FALSE;
}
rt_err_t rt_pci_setup_device(struct rt_pci_device *pdev)
{
rt_uint8_t pos;
rt_uint32_t class = 0;
struct rt_pci_host_bridge *host_bridge;
if (!pdev)
{
return -RT_EINVAL;
}
if (!(host_bridge = rt_pci_find_host_bridge(pdev->bus)))
{
return -RT_EINVAL;
}
rt_pci_ofw_device_init(pdev);
rt_pci_read_config_u32(pdev, PCIR_REVID, &class);
pdev->revision = class & 0xff;
pdev->class = class >> 8; /* Upper 3 bytes */
rt_pci_read_config_u8(pdev, PCIR_HDRTYPE, &pdev->hdr_type);
/* Clear errors left from system firmware */
rt_pci_write_config_u16(pdev, PCIR_STATUS, 0xffff);
if (pdev->hdr_type & 0x80)
{
pdev->multi_function = RT_TRUE;
}
pdev->hdr_type &= PCIM_HDRTYPE;
if (pci_intx_mask_broken(pdev))
{
pdev->broken_intx_masking = RT_TRUE;
}
rt_dm_dev_set_name(&pdev->parent, "%04x:%02x:%02x.%u", rt_pci_domain(pdev),
pdev->bus->number, RT_PCI_SLOT(pdev->devfn), RT_PCI_FUNC(pdev->devfn));
switch (pdev->hdr_type)
{
case PCIM_HDRTYPE_NORMAL:
if (class == PCIS_BRIDGE_PCI)
{
goto error;
}
pci_read_irq(pdev);
rt_pci_device_alloc_resource(host_bridge, pdev);
rt_pci_read_config_u16(pdev, PCIR_SUBVEND_0, &pdev->subsystem_vendor);
rt_pci_read_config_u16(pdev, PCIR_SUBDEV_0, &pdev->subsystem_device);
break;
case PCIM_HDRTYPE_BRIDGE:
pci_read_irq(pdev);
rt_pci_device_alloc_resource(host_bridge, pdev);
pos = rt_pci_find_capability(pdev, PCIY_SUBVENDOR);
if (pos)
{
rt_pci_read_config_u16(pdev, PCIR_SUBVENDCAP, &pdev->subsystem_vendor);
rt_pci_read_config_u16(pdev, PCIR_SUBDEVCAP, &pdev->subsystem_device);
}
break;
case PCIM_HDRTYPE_CARDBUS:
if (class != PCIS_BRIDGE_CARDBUS)
{
goto error;
}
pci_read_irq(pdev);
rt_pci_device_alloc_resource(host_bridge, pdev);
rt_pci_read_config_u16(pdev, PCIR_SUBVEND_2, &pdev->subsystem_vendor);
rt_pci_read_config_u16(pdev, PCIR_SUBDEV_2, &pdev->subsystem_device);
break;
default:
LOG_E("Ignoring device unknown header type %02x", pdev->hdr_type);
return -RT_EIO;
error:
LOG_E("Ignoring class %08x (doesn't match header type %02x)", pdev->class, pdev->hdr_type);
pdev->class = PCIC_NOT_DEFINED << 8;
}
pci_init_capabilities(pdev);
if (rt_pci_is_pcie(pdev))
{
rt_pci_read_config_u16(pdev, pdev->pcie_cap + PCIER_FLAGS, &pdev->exp_flags);
}
return RT_EOK;
}
static struct rt_pci_bus *pci_alloc_bus(struct rt_pci_bus *parent);
static rt_err_t pci_child_bus_init(struct rt_pci_bus *bus, rt_uint32_t bus_no,
struct rt_pci_host_bridge *host_bridge, struct rt_pci_device *pdev)
{
rt_err_t err;
struct rt_pci_bus *parent_bus = bus->parent;
bus->sysdata = parent_bus->sysdata;
bus->self = pdev;
bus->ops = host_bridge->child_ops ? : parent_bus->ops;
bus->number = bus_no;
rt_sprintf(bus->name, "%04x:%02x", host_bridge->domain, bus_no);
rt_pci_ofw_bus_init(bus);
if (bus->ops->add)
{
if ((err = bus->ops->add(bus)))
{
rt_pci_ofw_bus_free(bus);
LOG_E("PCI-Bus<%s> add bus failed with err = %s",
bus->name, rt_strerror(err));
return err;
}
}
return RT_EOK;
}
static rt_bool_t pci_ea_fixed_busnrs(struct rt_pci_device *pdev,
rt_uint8_t *sec, rt_uint8_t *sub)
{
int pos, offset;
rt_uint32_t dw;
rt_uint8_t ea_sec, ea_sub;
pos = rt_pci_find_capability(pdev, PCIY_EA);
if (!pos)
{
return RT_FALSE;
}
offset = pos + PCIR_EA_FIRST_ENT;
rt_pci_read_config_u32(pdev, offset, &dw);
ea_sec = PCIM_EA_SEC_NR(dw);
ea_sub = PCIM_EA_SUB_NR(dw);
if (ea_sec == 0 || ea_sub < ea_sec)
{
return RT_FALSE;
}
*sec = ea_sec;
*sub = ea_sub;
return RT_TRUE;
}
static void pcie_fixup_link(struct rt_pci_device *pdev)
{
int pos = pdev->pcie_cap;
rt_uint16_t exp_lnkctl, exp_lnkctl2, exp_lnksta;
rt_uint16_t exp_type = pdev->exp_flags & PCIEM_FLAGS_TYPE;
if ((pdev->exp_flags & PCIEM_FLAGS_VERSION) < 2)
{
return;
}
if (exp_type != PCIEM_TYPE_ROOT_PORT &&
exp_type != PCIEM_TYPE_DOWNSTREAM_PORT &&
exp_type != PCIEM_TYPE_PCIE_BRIDGE)
{
return;
}
rt_pci_read_config_u16(pdev, pos + PCIER_LINK_CTL, &exp_lnkctl);
rt_pci_read_config_u16(pdev, pos + PCIER_LINK_CTL2, &exp_lnkctl2);
rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL2,
(exp_lnkctl2 & ~PCIEM_LNKCTL2_TLS) | PCIEM_LNKCTL2_TLS_2_5GT);
rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL,
exp_lnkctl | PCIEM_LINK_CTL_RETRAIN_LINK);
for (int i = 0; i < 20; ++i)
{
rt_pci_read_config_u16(pdev, pos + PCIER_LINK_STA, &exp_lnksta);
if (!!(exp_lnksta & PCIEM_LINK_STA_DL_ACTIVE))
{
goto _status_sync;
}
rt_thread_mdelay(10);
}
/* Fail, restore */
rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL2, exp_lnkctl2);
rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL,
exp_lnkctl | PCIEM_LINK_CTL_RETRAIN_LINK);
_status_sync:
/* Wait a while for success or failure */
rt_thread_mdelay(100);
}
static rt_uint32_t pci_scan_bridge_extend(struct rt_pci_bus *bus, struct rt_pci_device *pdev,
rt_uint32_t bus_no_start, rt_uint32_t buses, rt_bool_t reconfigured)
{
rt_bool_t fixed_buses;
rt_uint8_t fixed_sub, fixed_sec;
rt_uint8_t primary, secondary, subordinate;
rt_uint32_t value, bus_no = bus_no_start;
struct rt_pci_bus *next_bus;
struct rt_pci_host_bridge *host_bridge;
/* We not supported init CardBus, it always used in the PC servers. */
if (pdev->hdr_type == PCIM_HDRTYPE_CARDBUS)
{
LOG_E("CardBus is not supported in system");
goto _end;
}
rt_pci_read_config_u32(pdev, PCIR_PRIBUS_1, &value);
primary = value & 0xff;
secondary = (value >> 8) & 0xff;
subordinate = (value >> 16) & 0xff;
if (primary == bus->number && bus->number > secondary && secondary > subordinate)
{
if (!reconfigured)
{
goto _end;
}
LOG_I("Bridge configuration: primary(%02x) secondary(%02x) subordinate(%02x)",
primary, secondary, subordinate);
}
if (pdev->pcie_cap)
{
pcie_fixup_link(pdev);
}
++bus_no;
/* Count of subordinate */
buses -= !!buses;
host_bridge = rt_pci_find_host_bridge(bus);
RT_ASSERT(host_bridge != RT_NULL);
/* Clear errors */
rt_pci_write_config_u16(pdev, PCIR_STATUS, RT_UINT16_MAX);
fixed_buses = pci_ea_fixed_busnrs(pdev, &fixed_sec, &fixed_sub);
if (!(next_bus = pci_alloc_bus(bus)))
{
goto _end;
}
/* Clear bus info */
rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value & ~0xffffff);
if (!(next_bus = pci_alloc_bus(bus)))
{
LOG_E("Alloc bus(%02x) fail", bus_no);
goto _end;
}
if (pci_child_bus_init(next_bus, bus_no, host_bridge, pdev))
{
goto _end;
}
/* Fill primary, secondary */
value = (buses & 0xff000000) | (bus->number << 0) | (next_bus->number << 8);
rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value);
bus_no = rt_pci_scan_child_buses(next_bus, buses);
/* Fill subordinate */
value |= next_bus->number + rt_list_len(&next_bus->children_nodes);
rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value);
if (fixed_buses)
{
bus_no = fixed_sub;
}
rt_pci_write_config_u8(pdev, PCIR_SUBBUS_1, bus_no);
_end:
return bus_no;
}
rt_uint32_t rt_pci_scan_bridge(struct rt_pci_bus *bus, struct rt_pci_device *pdev,
rt_uint32_t bus_no_start, rt_bool_t reconfigured)
{
if (!bus || !pdev)
{
return RT_UINT32_MAX;
}
return pci_scan_bridge_extend(bus, pdev, bus_no_start, 0, reconfigured);
}
rt_inline rt_bool_t only_one_child(struct rt_pci_bus *bus)
{
struct rt_pci_device *pdev;
if (rt_pci_is_root_bus(bus))
{
return RT_FALSE;
}
pdev = bus->self;
if (rt_pci_is_pcie(pdev))
{
rt_uint16_t exp_type = pdev->exp_flags & PCIEM_FLAGS_TYPE;
if (exp_type == PCIEM_TYPE_ROOT_PORT ||
exp_type == PCIEM_TYPE_DOWNSTREAM_PORT ||
exp_type == PCIEM_TYPE_PCIE_BRIDGE)
{
return RT_TRUE;
}
}
return RT_FALSE;
}
static int next_fn(struct rt_pci_bus *bus, struct rt_pci_device *pdev, int fn)
{
if (!rt_pci_is_root_bus(bus) && bus->self->ari_enabled)
{
int pos, next_fn;
rt_uint16_t cap = 0;
if (!pdev)
{
return -RT_EINVAL;
}
pos = rt_pci_find_ext_capability(pdev, PCIZ_ARI);
if (!pos)
{
return -RT_EINVAL;
}
rt_pci_read_config_u16(pdev, pos + PCIR_ARI_CAP, &cap);
next_fn = PCIM_ARI_CAP_NFN(cap);
if (next_fn <= fn)
{
return -RT_EINVAL;
}
return next_fn;
}
if (fn >= RT_PCI_FUNCTION_MAX - 1)
{
return -RT_EINVAL;
}
if (pdev && !pdev->multi_function)
{
return -RT_EINVAL;
}
return fn + 1;
}
rt_size_t rt_pci_scan_slot(struct rt_pci_bus *bus, rt_uint32_t devfn)
{
rt_size_t nr = 0;
struct rt_pci_device *pdev = RT_NULL;
if (!bus)
{
return nr;
}
if (devfn > 0 && only_one_child(bus))
{
return nr;
}
for (int func = 0; func >= 0; func = next_fn(bus, pdev, func))
{
pdev = rt_pci_scan_single_device(bus, devfn + func);
if (pdev)
{
++nr;
if (func > 0)
{
pdev->multi_function = RT_TRUE;
}
}
else if (func == 0)
{
break;
}
}
return nr;
}
rt_uint32_t rt_pci_scan_child_buses(struct rt_pci_bus *bus, rt_size_t buses)
{
rt_uint32_t bus_no;
struct rt_pci_device *pdev = RT_NULL;
if (!bus)
{
bus_no = RT_UINT32_MAX;
goto _end;
}
bus_no = bus->number;
for (rt_uint32_t devfn = 0;
devfn < RT_PCI_DEVFN(RT_PCI_DEVICE_MAX - 1, RT_PCI_FUNCTION_MAX - 1);
devfn += RT_PCI_FUNCTION_MAX)
{
rt_pci_scan_slot(bus, devfn);
}
rt_pci_foreach_bridge(pdev, bus)
{
int offset;
bus_no = pci_scan_bridge_extend(bus, pdev, bus_no, buses, RT_TRUE);
offset = bus_no - bus->number;
if (buses > offset)
{
buses -= offset;
}
else
{
break;
}
}
_end:
return bus_no;
}
rt_uint32_t rt_pci_scan_child_bus(struct rt_pci_bus *bus)
{
return rt_pci_scan_child_buses(bus, 0);
}
static struct rt_pci_bus *pci_alloc_bus(struct rt_pci_bus *parent)
{
struct rt_pci_bus *bus = rt_calloc(1, sizeof(*bus));
if (!bus)
{
return RT_NULL;
}
bus->parent = parent;
rt_list_init(&bus->list);
rt_list_init(&bus->children_nodes);
rt_list_init(&bus->devices_nodes);
rt_spin_lock_init(&bus->lock);
return bus;
}
rt_err_t rt_pci_host_bridge_register(struct rt_pci_host_bridge *host_bridge)
{
struct rt_pci_bus *bus = pci_alloc_bus(RT_NULL);
if (!bus)
{
return -RT_ENOMEM;
}
host_bridge->root_bus = bus;
bus->sysdata = host_bridge->sysdata;
bus->host_bridge = host_bridge;
bus->ops = host_bridge->ops;
bus->number = host_bridge->bus_range[0];
rt_sprintf(bus->name, "%04x:%02x", host_bridge->domain, bus->number);
if (bus->ops->add)
{
rt_err_t err = bus->ops->add(bus);
if (err)
{
LOG_E("PCI-Bus<%s> add bus failed with err = %s", bus->name, rt_strerror(err));
}
}
return RT_EOK;
}
rt_err_t rt_pci_scan_root_bus_bridge(struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err;
if ((err = rt_pci_host_bridge_register(host_bridge)))
{
return err;
}
rt_pci_scan_child_bus(host_bridge->root_bus);
return err;
}
rt_err_t rt_pci_host_bridge_probe(struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err;
err = rt_pci_scan_root_bus_bridge(host_bridge);
return err;
}
static rt_bool_t pci_remove_bus_device(struct rt_pci_device *pdev, void *data)
{
/* Bus will free if this is the last device */
rt_bus_remove_device(&pdev->parent);
/* To find all devices, always return false */
return RT_FALSE;
}
rt_err_t rt_pci_host_bridge_remove(struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err = RT_EOK;
if (host_bridge && host_bridge->root_bus)
{
rt_pci_enum_device(host_bridge->root_bus, pci_remove_bus_device, RT_NULL);
host_bridge->root_bus = RT_NULL;
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_bus_remove(struct rt_pci_bus *bus)
{
rt_err_t err = RT_EOK;
if (bus)
{
spin_lock(&bus->lock);
if (rt_list_isempty(&bus->children_nodes) &&
rt_list_isempty(&bus->devices_nodes))
{
rt_list_remove(&bus->list);
spin_unlock(&bus->lock);
if (bus->ops->remove)
{
bus->ops->remove(bus);
}
rt_pci_ofw_bus_free(bus);
rt_free(bus);
}
else
{
spin_unlock(&bus->lock);
err = -RT_EBUSY;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_device_remove(struct rt_pci_device *pdev)
{
rt_err_t err = RT_EOK;
if (pdev)
{
struct rt_pci_bus *bus = pdev->bus;
spin_lock(&bus->lock);
while (pdev->parent.ref_count > 1)
{
spin_unlock(&bus->lock);
rt_thread_yield();
spin_lock(&bus->lock);
}
rt_list_remove(&pdev->list);
spin_unlock(&bus->lock);
rt_free(pdev);
}
else
{
err = -RT_EINVAL;
}
return err;
}