rtt更新

This commit is contained in:
2025-01-18 13:25:25 +08:00
parent c6a7554b51
commit d6009a0773
726 changed files with 103376 additions and 6270 deletions

View File

@@ -21,8 +21,21 @@ rsource "touch/Kconfig"
rsource "graphic/Kconfig"
rsource "hwcrypto/Kconfig"
rsource "wlan/Kconfig"
rsource "led/Kconfig"
rsource "mailbox/Kconfig"
rsource "phye/Kconfig"
rsource "ata/Kconfig"
rsource "nvme/Kconfig"
rsource "block/Kconfig"
rsource "scsi/Kconfig"
rsource "regulator/Kconfig"
rsource "reset/Kconfig"
rsource "thermal/Kconfig"
rsource "virtio/Kconfig"
rsource "dma/Kconfig"
rsource "mfd/Kconfig"
rsource "ofw/Kconfig"
rsource "pci/Kconfig"
rsource "pic/Kconfig"
rsource "pin/Kconfig"
rsource "pinctrl/Kconfig"

View File

@@ -0,0 +1,22 @@
menuconfig RT_USING_ATA
bool "Using Advanced Technology Attachment (ATA) device drivers"
depends on RT_USING_DM
depends on RT_USING_BLK
depends on RT_USING_DMA
default n
config RT_ATA_AHCI
bool "Advanced Host Controller Interface (AHCI)"
depends on RT_USING_ATA
depends on RT_USING_SCSI
default y
config RT_ATA_AHCI_PCI
bool "AHCI support on PCI bus"
depends on RT_ATA_AHCI
depends on RT_USING_PCI
default n
if RT_USING_ATA
osource "$(SOC_DM_ATA_DIR)/Kconfig"
endif

View File

@@ -0,0 +1,21 @@
from building import *
group = []
if not GetDepend(['RT_USING_ATA']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../include']
src = []
if GetDepend(['RT_ATA_AHCI']):
src += ['ahci.c']
if GetDepend(['RT_ATA_AHCI_PCI']):
src += ['ahci-pci.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,206 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#include <rtthread.h>
#include <rtdevice.h>
#define AHCI_REG_BAR 5
struct pci_ahci_quirk
{
int bar_idx;
rt_bool_t bar_offset;
const struct rt_ahci_ops *ops;
};
struct pci_ahci_host
{
struct rt_ahci_host parent;
const struct pci_ahci_quirk *quirk;
rt_bool_t is_msi;
};
#define raw_to_pci_ahci_host(raw) rt_container_of(raw, struct pci_ahci_host, parent)
static rt_err_t pci_ahci_init(struct rt_ahci_host *host)
{
struct rt_pci_device *pdev;
pdev = rt_container_of(host->parent.dev, struct rt_pci_device, parent);
if (pdev->vendor == PCI_VENDOR_ID_JMICRON)
{
rt_pci_write_config_u8(pdev, 0x41, 0xa1);
}
return RT_EOK;
}
static const struct rt_ahci_ops pci_ahci_ops =
{
.host_init = pci_ahci_init,
};
static rt_err_t pci_ahci_intel_init(struct rt_ahci_host *host)
{
rt_uint16_t val;
struct rt_pci_device *pdev;
pdev = rt_container_of(host->parent.dev, struct rt_pci_device, parent);
rt_pci_read_config_u16(pdev, 0x92, &val);
rt_pci_write_config_u16(pdev, 0x92, val & ~0xf);
rt_thread_mdelay(10);
rt_pci_write_config_u16(pdev, 0x92, val | 0xf);
return RT_EOK;
}
static const struct rt_ahci_ops pci_ahci_intel_ops =
{
.host_init = pci_ahci_intel_init,
};
static rt_err_t pci_ahci_probe(struct rt_pci_device *pdev)
{
rt_err_t err;
int bar_idx;
struct rt_ahci_host *ahci;
struct pci_ahci_host *pci_ahci = rt_calloc(1, sizeof(*pci_ahci));
const struct pci_ahci_quirk *quirk = pdev->id->data;
if (!pci_ahci)
{
return -RT_ENOMEM;
}
pci_ahci->quirk = quirk;
ahci = &pci_ahci->parent;
ahci->parent.dev = &pdev->parent;
bar_idx = quirk && quirk->bar_offset ? quirk->bar_idx : AHCI_REG_BAR;
ahci->regs = rt_pci_iomap(pdev, bar_idx);
if (!ahci->regs)
{
err = -RT_EIO;
goto _fail;
}
ahci->ops = quirk && quirk->ops ? quirk->ops : &pci_ahci_ops;
if (rt_pci_msi_enable(pdev) > 0)
{
pci_ahci->is_msi = RT_TRUE;
}
else
{
rt_pci_irq_unmask(pdev);
}
ahci->irq = pdev->irq;
rt_pci_set_master(pdev);
if ((err = rt_ahci_host_register(ahci)))
{
goto _disable;
}
pdev->parent.user_data = pci_ahci;
return RT_EOK;
_disable:
if (pci_ahci->is_msi)
{
rt_pci_msix_disable(pdev);
}
else
{
rt_pci_irq_mask(pdev);
}
rt_pci_clear_master(pdev);
rt_iounmap(ahci->regs);
_fail:
rt_free(pci_ahci);
return err;
}
static rt_err_t pci_ahci_remove(struct rt_pci_device *pdev)
{
struct rt_ahci_host *ahci;
struct pci_ahci_host *pci_ahci = pdev->parent.user_data;
ahci = &pci_ahci->parent;
rt_ahci_host_unregister(ahci);
if (pci_ahci->is_msi)
{
rt_pci_msi_disable(pdev);
}
else
{
/* INTx is shared, don't mask all */
rt_hw_interrupt_umask(pdev->irq);
rt_pci_irq_mask(pdev);
}
rt_pci_clear_master(pdev);
rt_iounmap(ahci->regs);
rt_free(pci_ahci);
return RT_EOK;
}
static rt_err_t pci_ahci_shutdown(struct rt_pci_device *pdev)
{
return pci_ahci_remove(pdev);
}
static struct pci_ahci_quirk intel_quirk =
{
.ops = &pci_ahci_intel_ops,
};
static struct pci_ahci_quirk cavium_sata_quirk =
{
.bar_idx = 0,
.bar_offset = RT_TRUE,
};
static const struct rt_pci_device_id pci_ahci_ids[] =
{
{ RT_PCI_DEVICE_ID(PCI_VENDOR_ID_INTEL, 0x2922), .data = &intel_quirk },
{ RT_PCI_DEVICE_ID(PCI_VENDOR_ID_ASMEDIA, 0x0611) },
{ RT_PCI_DEVICE_ID(PCI_VENDOR_ID_MARVELL, 0x6121) },
{ RT_PCI_DEVICE_ID(PCI_VENDOR_ID_MARVELL, 0x6145) },
{ RT_PCI_DEVICE_ID(PCI_VENDOR_ID_CAVIUM, 0xa01c), .data = &cavium_sata_quirk },
{ RT_PCI_DEVICE_CLASS(PCIS_STORAGE_SATA_AHCI, ~0) },
{ /* sentinel */ }
};
static struct rt_pci_driver pci_ahci_driver =
{
.name = "ahci-pci",
.ids = pci_ahci_ids,
.probe = pci_ahci_probe,
.remove = pci_ahci_remove,
.shutdown = pci_ahci_shutdown,
};
RT_PCI_DRIVER_EXPORT(pci_ahci_driver);

View File

@@ -0,0 +1,896 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#define DBG_TAG "rtdm.ahci"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#define HWREG32_FLUSH(base, value) \
do { \
rt_uint32_t __value = value; \
HWREG32(base) = __value; \
__value = HWREG32(base); \
} while (0)
static void ahci_fill_cmd_slot(struct rt_ahci_port *port, rt_uint32_t opts)
{
rt_ubase_t dma_addr = port->cmd_tbl_dma;
struct rt_ahci_cmd_hdr *cmd_slot = port->cmd_slot;
cmd_slot->opts = rt_cpu_to_le32(opts);
cmd_slot->status = 0;
cmd_slot->tbl_addr_lo = rt_cpu_to_le32(rt_lower_32_bits(dma_addr));
cmd_slot->tbl_addr_hi = rt_cpu_to_le32(rt_upper_32_bits(dma_addr));
}
static int ahci_fill_sg(struct rt_ahci_host *host, int id,
void *buffer, rt_size_t buffer_size)
{
int sg_count;
rt_ubase_t dma_addr;
struct rt_ahci_port *port = &host->ports[id];
struct rt_ahci_sg *ahci_sg = port->cmd_tbl_sg;
sg_count = ((buffer_size - 1) / RT_ACHI_PRDT_BYTES_MAX) + 1;
if (sg_count > RT_AHCI_MAX_SG)
{
return -1;
}
dma_addr = (rt_ubase_t)rt_kmem_v2p(buffer);
for (int i = 0; i < sg_count; ++i, ++ahci_sg)
{
ahci_sg->addr_lo = rt_cpu_to_le32(rt_lower_32_bits(dma_addr));
ahci_sg->addr_hi = rt_cpu_to_le32(rt_upper_32_bits(dma_addr));
if (ahci_sg->addr_hi && !(host->cap & RT_AHCI_CAP_64))
{
return -1;
}
ahci_sg->flags_size = rt_cpu_to_le32(0x3fffff &
(rt_min_t(rt_uint32_t, buffer_size, RT_ACHI_PRDT_BYTES_MAX) - 1));
dma_addr += RT_ACHI_PRDT_BYTES_MAX;
buffer_size -= RT_ACHI_PRDT_BYTES_MAX;
}
return sg_count;
}
static rt_err_t ahci_request_io(struct rt_ahci_host *host, int id,
void *fis, rt_size_t fis_size,
void *buffer, rt_size_t buffer_size, rt_bool_t is_read)
{
int sg_count;
rt_err_t err;
struct rt_ahci_port *port = &host->ports[id];
if ((HWREG32(port->regs + RT_AHCI_PORT_SSTS) & 0xf) != RT_AHCI_PORT_SSTS_DET_PHYRDY)
{
return -RT_EIO;
}
if ((sg_count = ahci_fill_sg(host, id, buffer, buffer_size)) <= 0)
{
return -RT_EINVAL;
}
rt_memcpy(port->cmd_tbl, fis, fis_size);
ahci_fill_cmd_slot(port, (fis_size >> 2) | (sg_count << 16) | (!is_read << 6));
if (!is_read)
{
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, buffer, buffer_size);
}
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_CI, 1);
err = rt_completion_wait(&port->done, rt_tick_from_millisecond(10000));
if (!err && is_read)
{
rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, buffer, buffer_size);
}
return err;
}
static rt_err_t ahci_scsi_cmd_rw(struct rt_ahci_host *host, int id,
rt_off_t lba, void *buffer, rt_ssize_t size, rt_bool_t is_read)
{
rt_err_t err;
rt_uint8_t fis[20];
struct rt_ahci_port *port = &host->ports[id];
rt_memset(fis, 0, sizeof(fis));
fis[0] = RT_AHCI_FIS_TYPE_REG_H2D;
fis[1] = 1 << 7; /* Command */
fis[2] = is_read ? RT_AHCI_ATA_CMD_READ_EXT : RT_AHCI_ATA_CMD_WRITE_EXT;
while (size > 0)
{
rt_size_t t_size, t_lba;
t_lba = rt_min_t(rt_size_t, host->max_blocks, size);
t_size = port->block_size * t_lba;
fis[3] = 0xe0; /* Features */
fis[4] = (lba >> 0) & 0xff; /* LBA low register */
fis[5] = (lba >> 8) & 0xff; /* LBA mid register */
fis[6] = (lba >> 16) & 0xff; /* LBA high register */
fis[7] = 1 << 6; /* Device */
fis[8] = ((lba >> 24) & 0xff); /* LBA register, 31:24 */
fis[9] = ((lba >> 32) & 0xff); /* LBA register, 39:32 */
fis[10] = ((lba >> 40) & 0xff); /* LBA register, 47:40 */
fis[12] = (t_lba >> 0) & 0xff; /* Count register, 7:0 */
fis[13] = (t_lba >> 8) & 0xff; /* Count register, 15:8 */
if ((err = ahci_request_io(host, id, fis, sizeof(fis), buffer, t_size, is_read)))
{
return err;
}
size -= t_lba;
lba += t_lba;
buffer += t_size;
}
return RT_EOK;
}
static rt_err_t ahci_scsi_synchronize_cache(struct rt_ahci_host *host, int id,
rt_off_t lba, rt_size_t size)
{
rt_uint8_t fis[20];
rt_uint16_t *ataid;
struct rt_ahci_port *port = &host->ports[id];
ataid = port->ataid;
if (!rt_ahci_ata_id_wcache_enabled(ataid) &&
!rt_ahci_ata_id_has_flush(ataid) &&
!rt_ahci_ata_id_has_flush_ext(ataid))
{
return -RT_ENOSYS;
}
rt_memset(fis, 0, sizeof(fis));
fis[0] = RT_AHCI_FIS_TYPE_REG_H2D;
fis[1] = 1 << 7; /* Command */
if (rt_ahci_ata_id_has_flush_ext(ataid))
{
fis[2] = RT_AHCI_ATA_CMD_FLUSH_EXT;
}
else
{
fis[2] = RT_AHCI_ATA_CMD_FLUSH;
}
rt_memcpy(port->cmd_tbl, fis, 20);
ahci_fill_cmd_slot(port, 5);
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_CI, 1);
return rt_completion_wait(&port->done, rt_tick_from_millisecond(5000));
}
static rt_err_t ahci_scsi_cmd_write_same(struct rt_ahci_host *host, int id,
rt_off_t lba, rt_size_t size)
{
rt_uint8_t fis[20];
struct rt_ahci_port *port = &host->ports[id];
rt_memset(fis, 0, sizeof(fis));
fis[0] = RT_AHCI_FIS_TYPE_REG_H2D;
fis[1] = 1 << 7; /* Command */
fis[2] = RT_AHCI_ATA_CMD_DSM;
fis[3] = RT_AHCI_ATA_DSM_TRIM; /* Features */
fis[4] = (lba >> 0) & 0xff; /* LBA low register */
fis[5] = (lba >> 8) & 0xff; /* LBA mid register */
fis[6] = (lba >> 16) & 0xff; /* LBA high register */
fis[7] = 1 << 6; /* Device */
fis[8] = ((lba >> 24) & 0xff); /* LBA register, 31:24 */
fis[9] = ((lba >> 32) & 0xff); /* LBA register, 39:32 */
fis[10] = ((lba >> 40) & 0xff); /* LBA register, 47:40 */
fis[12] = (size >> 0) & 0xff; /* Count register, 7:0 */
fis[13] = (size >> 8) & 0xff; /* Count register, 15:8 */
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_CI, 1);
return rt_completion_wait(&port->done, rt_tick_from_millisecond(5000));
}
static rt_err_t ahci_scsi_cmd_read_capacity(struct rt_ahci_host *host, int id,
rt_size_t *out_last_block, rt_size_t *out_block_size)
{
struct rt_ahci_port *port = &host->ports[id];
if (!port->ataid)
{
return -RT_EIO;
}
*out_last_block = rt_ahci_ata_id_n_sectors(port->ataid) - 1;
*out_block_size = port->block_size;
return RT_EOK;
}
static rt_err_t ahci_scsi_cmd_test_unit_ready(struct rt_ahci_host *host, int id)
{
struct rt_ahci_port *port = &host->ports[id];
return port->ataid ? RT_EOK : -RT_EIO;
}
static rt_err_t ahci_scsi_cmd_inquiry(struct rt_ahci_host *host, int id,
char *prodid, rt_size_t prodid_len, char *prodrev, rt_size_t prodrev_len)
{
rt_err_t err;
rt_uint8_t fis[20];
rt_uint16_t *ataid;
struct rt_ahci_port *port = &host->ports[id];
if (!port->link)
{
return -RT_EIO;
}
if (!port->ataid && !(port->ataid = rt_malloc(RT_AHCI_ATA_ID_WORDS * 2)))
{
return -RT_ENOMEM;
}
ataid = port->ataid;
rt_memset(fis, 0, sizeof(fis));
fis[0] = RT_AHCI_FIS_TYPE_REG_H2D;
fis[1] = 1 << 7; /* Command */
fis[2] = RT_AHCI_ATA_CMD_ID_ATA;
if ((err = ahci_request_io(host, id, fis, sizeof(fis),
ataid, RT_AHCI_ATA_ID_WORDS * 2, RT_TRUE)))
{
return err;
}
for (int i = 0; i < RT_AHCI_ATA_ID_WORDS; ++i)
{
ataid[i] = rt_le16_to_cpu(ataid[i]);
}
for (int i = 0; i < prodid_len / 2; ++i)
{
rt_uint16_t src = ataid[RT_AHCI_ATA_ID_PROD + i];
prodid[i] = (src & 0x00ff) << 8 | (src & 0xff00) >> 8;
}
for (int i = 0; i < prodrev_len / 2; ++i)
{
rt_uint16_t src = ataid[RT_AHCI_ATA_ID_FW_REV + i];
prodrev[i] = (src & 0x00ff) << 8 | (src & 0xff00) >> 8;
}
return err;
}
static rt_err_t ahci_scsi_transfer(struct rt_scsi_device *sdev,
struct rt_scsi_cmd *cmd)
{
rt_err_t err;
struct rt_ahci_host *host;
host = rt_container_of(sdev->host, struct rt_ahci_host, parent);
switch (cmd->op.unknow.opcode)
{
case RT_SCSI_CMD_REQUEST_SENSE:
{
struct rt_scsi_request_sense_data *request_sense = &cmd->data.request_sense;
request_sense->error_code = 0x72;
err = RT_EOK;
}
break;
case RT_SCSI_CMD_READ10:
{
struct rt_scsi_read10 *read10 = &cmd->op.read10;
err = ahci_scsi_cmd_rw(host, sdev->id,
rt_be32_to_cpu(read10->lba),
cmd->data.ptr,
rt_be16_to_cpu(read10->size),
RT_TRUE);
}
break;
case RT_SCSI_CMD_READ16:
{
struct rt_scsi_read16 *read16 = &cmd->op.read16;
err = ahci_scsi_cmd_rw(host, sdev->id,
rt_be64_to_cpu(read16->lba),
cmd->data.ptr,
rt_be32_to_cpu(read16->size),
RT_TRUE);
}
break;
case RT_SCSI_CMD_READ12:
{
struct rt_scsi_read12 *read12 = &cmd->op.read12;
err = ahci_scsi_cmd_rw(host, sdev->id,
rt_be32_to_cpu(read12->lba),
cmd->data.ptr,
rt_be32_to_cpu(read12->size),
RT_TRUE);
}
break;
case RT_SCSI_CMD_WRITE10:
{
struct rt_scsi_write10 *write10 = &cmd->op.write10;
err = ahci_scsi_cmd_rw(host, sdev->id,
rt_be32_to_cpu(write10->lba),
cmd->data.ptr,
rt_be16_to_cpu(write10->size),
RT_FALSE);
}
break;
case RT_SCSI_CMD_WRITE16:
{
struct rt_scsi_write16 *write16 = &cmd->op.write16;
err = ahci_scsi_cmd_rw(host, sdev->id,
rt_be64_to_cpu(write16->lba),
cmd->data.ptr,
rt_be32_to_cpu(write16->size),
RT_FALSE);
}
break;
case RT_SCSI_CMD_WRITE12:
{
struct rt_scsi_write12 *write12 = &cmd->op.write12;
err = ahci_scsi_cmd_rw(host, sdev->id,
rt_be32_to_cpu(write12->lba),
cmd->data.ptr,
rt_be32_to_cpu(write12->size),
RT_FALSE);
}
break;
case RT_SCSI_CMD_SYNCHRONIZE_CACHE10:
{
struct rt_scsi_synchronize_cache10 *synchronize_cache10 = &cmd->op.synchronize_cache10;
err = ahci_scsi_synchronize_cache(host, sdev->id,
rt_be32_to_cpu(synchronize_cache10->lba),
rt_be16_to_cpu(synchronize_cache10->size));
}
break;
case RT_SCSI_CMD_SYNCHRONIZE_CACHE16:
{
struct rt_scsi_synchronize_cache16 *synchronize_cache16 = &cmd->op.synchronize_cache16;
err = ahci_scsi_synchronize_cache(host, sdev->id,
rt_be64_to_cpu(synchronize_cache16->lba),
rt_be32_to_cpu(synchronize_cache16->size));
}
break;
case RT_SCSI_CMD_WRITE_SAME10:
{
struct rt_scsi_write_same10 *write_same10 = &cmd->op.write_same10;
err = ahci_scsi_cmd_write_same(host, sdev->id,
rt_be32_to_cpu(write_same10->lba), rt_be16_to_cpu(write_same10->size));
}
break;
case RT_SCSI_CMD_WRITE_SAME16:
{
struct rt_scsi_write_same16 *write_same16 = &cmd->op.write_same16;
err = ahci_scsi_cmd_write_same(host, sdev->id,
rt_be64_to_cpu(write_same16->lba), rt_be32_to_cpu(write_same16->size));
}
break;
case RT_SCSI_CMD_READ_CAPACITY10:
{
rt_size_t last_block, block_size;
struct rt_scsi_read_capacity10_data *data = &cmd->data.read_capacity10;
err = ahci_scsi_cmd_read_capacity(host, sdev->id, &last_block, &block_size);
if (!err)
{
if (last_block > 0x100000000ULL)
{
last_block = 0xffffffff;
}
data->last_block = rt_cpu_to_be32(last_block);
data->block_size = rt_cpu_to_be32(block_size);
}
}
break;
case RT_SCSI_CMD_READ_CAPACITY16:
{
rt_size_t last_block, block_size;
struct rt_scsi_read_capacity16_data *data = &cmd->data.read_capacity16;
err = ahci_scsi_cmd_read_capacity(host, sdev->id, &last_block, &block_size);
if (!err)
{
data->last_block = rt_cpu_to_be64(last_block);
data->block_size = rt_cpu_to_be32(block_size);
}
}
break;
case RT_SCSI_CMD_TEST_UNIT_READY:
err = ahci_scsi_cmd_test_unit_ready(host, sdev->id);
break;
case RT_SCSI_CMD_INQUIRY:
{
struct rt_ahci_port *port = &host->ports[sdev->id];
struct rt_scsi_inquiry_data *inquiry = &cmd->data.inquiry;
err = ahci_scsi_cmd_inquiry(host, sdev->id,
inquiry->prodid, sizeof(inquiry->prodid),
inquiry->prodrev, sizeof(inquiry->prodrev));
if (!err)
{
rt_memcpy(inquiry->vendor, "ATA ", sizeof(inquiry->vendor));
if (HWREG32(port->regs + RT_AHCI_PORT_SIG) != RT_AHCI_PORT_SIG_SATA_CDROM)
{
port->block_size = 512;
inquiry->devtype = SCSI_DEVICE_TYPE_DIRECT;
}
else
{
port->block_size = 2048;
inquiry->devtype = SCSI_DEVICE_TYPE_CDROM;
}
inquiry->rmb = 0;
inquiry->length = 95 - 4;
}
}
break;
case RT_SCSI_CMD_MODE_SENSE:
case RT_SCSI_CMD_MODE_SENSE10:
case RT_SCSI_CMD_MODE_SELECT:
case RT_SCSI_CMD_MODE_SELECT10:
return -RT_ENOSYS;
default:
return -RT_EINVAL;
}
return err;
}
static struct rt_scsi_ops ahci_scsi_ops =
{
.transfer = ahci_scsi_transfer,
};
static void ahci_isr(int irqno, void *param)
{
int id;
rt_uint32_t isr;
rt_bitmap_t int_map;
struct rt_ahci_port *port;
struct rt_ahci_host *host = param;
int_map = HWREG32(host->regs + RT_AHCI_HBA_INTS);
rt_bitmap_for_each_set_bit(&int_map, id, host->ports_nr)
{
port = &host->ports[id];
isr = HWREG32(port->regs + RT_AHCI_PORT_INTS);
if (port->link)
{
if (host->ops->port_isr)
{
host->ops->port_isr(host, port, isr);
}
rt_completion_done(&port->done);
}
HWREG32(port->regs + RT_AHCI_PORT_INTS) = isr;
}
HWREG32(host->regs + RT_AHCI_HBA_INTS) = int_map;
}
rt_err_t rt_ahci_host_register(struct rt_ahci_host *host)
{
rt_err_t err;
rt_uint32_t value;
char dev_name[RT_NAME_MAX];
struct rt_scsi_host *scsi;
if (!host || !host->parent.dev || !host->ops)
{
return -RT_EINVAL;
}
host->max_blocks = host->max_blocks ? : 0x80;
/*
* 1. Reset HBA.
*/
err = -RT_EIO;
value = HWREG32(host->regs + RT_AHCI_HBA_GHC);
if (!(value & RT_AHCI_GHC_RESET))
{
HWREG32_FLUSH(host->regs + RT_AHCI_HBA_GHC, value | RT_AHCI_GHC_RESET);
}
for (int i = 0; i < 5; ++i)
{
rt_thread_mdelay(200);
if (!(HWREG32(host->regs + RT_AHCI_HBA_GHC) & RT_AHCI_GHC_RESET))
{
err = RT_EOK;
break;
}
}
if (err)
{
goto _fail;
}
/*
* 2. Enable AHCI and get the ports' information.
*/
HWREG32_FLUSH(host->regs + RT_AHCI_HBA_GHC, RT_AHCI_GHC_AHCI_EN);
host->cap = HWREG32(host->regs + RT_AHCI_HBA_CAP);
host->cap &= RT_AHCI_CAP_SPM | RT_AHCI_CAP_SSS | RT_AHCI_CAP_SIS;
HWREG32(host->regs + RT_AHCI_HBA_CAP) = host->cap;
host->cap = HWREG32(host->regs + RT_AHCI_HBA_CAP);
HWREG32_FLUSH(host->regs + RT_AHCI_HBA_PI, 0xf);
if (host->ops->host_init && (err = host->ops->host_init(host)))
{
goto _fail;
}
host->ports_nr = (host->cap & RT_AHCI_CAP_NP) + 1;
host->ports_map = HWREG32(host->regs + RT_AHCI_HBA_PI);
/* Check implemented in firmware */
rt_dm_dev_prop_read_u32(host->parent.dev, "ports-implemented", &host->ports_map);
for (int i = 0; i < host->ports_nr; ++i)
{
struct rt_ahci_port *port;
if (!(host->ports_map & RT_BIT(i)))
{
continue;
}
port = &host->ports[i];
/*
* 3. Alloc port io memory.
*/
port->regs = host->regs + 0x100 + (i * 0x80);
/*
* 4. Make port stop.
*/
value = HWREG32(port->regs + RT_AHCI_PORT_CMD);
if (value & (RT_AHCI_PORT_CMD_LIST_ON | RT_AHCI_PORT_CMD_FIS_ON |
RT_AHCI_PORT_CMD_FIS_RX | RT_AHCI_PORT_CMD_START))
{
value &= ~(RT_AHCI_PORT_CMD_LIST_ON | RT_AHCI_PORT_CMD_FIS_ON |
RT_AHCI_PORT_CMD_FIS_RX | RT_AHCI_PORT_CMD_START);
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_CMD, value);
rt_thread_mdelay(500);
}
if (host->ops->port_init && (err = host->ops->port_init(host, port)))
{
LOG_E("Init port[%d] error = %s", rt_strerror(err));
continue;
}
value = HWREG32(port->regs + RT_AHCI_PORT_CMD);
value |= RT_AHCI_PORT_CMD_SPIN_UP;
HWREG32(port->regs + RT_AHCI_PORT_CMD) = value;
/*
* 5. Enable port's SATA link.
*/
if (host->ops->port_link_up)
{
err = host->ops->port_link_up(host, port);
}
else
{
err = -RT_ETIMEOUT;
for (int retry = 0; retry < 5; ++retry)
{
value = HWREG32(port->regs + RT_AHCI_PORT_SSTS);
if ((value & RT_AHCI_PORT_SSTS_DET_MASK) == RT_AHCI_PORT_SSTS_DET_PHYRDY)
{
err = RT_EOK;
break;
}
rt_thread_mdelay(2);
}
}
if (err)
{
if (HWREG32(port->regs + RT_AHCI_PORT_SSTS) & RT_AHCI_PORT_SSTS_DET_MASK)
{
LOG_E("SATA[%d] link error = %s", i, rt_strerror(err));
}
else
{
LOG_D("SATA[%d] not device", i);
}
continue;
}
/* Clear error status */
if ((value = HWREG32(port->regs + RT_AHCI_PORT_SERR)))
{
HWREG32(port->regs + RT_AHCI_PORT_SERR) = value;
}
for (int retry = 0; retry < 5; ++retry)
{
value = HWREG32(port->regs + RT_AHCI_PORT_TFD);
if (!(value & (RT_AHCI_PORT_TFDATA_BSY | RT_AHCI_PORT_TFDATA_DRQ)))
{
break;
}
rt_thread_mdelay(2);
value = HWREG32(port->regs + RT_AHCI_PORT_SSTS);
if ((value & RT_AHCI_PORT_SSTS_DET_MASK) == RT_AHCI_PORT_SSTS_DET_PHYRDY)
{
break;
}
}
value = HWREG32(port->regs + RT_AHCI_PORT_SSTS) & RT_AHCI_PORT_SSTS_DET_MASK;
if (value == RT_AHCI_PORT_SSTS_DET_COMINIT)
{
/* Retry to setup */
--i;
continue;
}
/* Clear error */
value = HWREG32(port->regs + RT_AHCI_PORT_SERR);
HWREG32(port->regs + RT_AHCI_PORT_SERR) = value;
/* Clear pending IRQ */
if ((value = HWREG32(port->regs + RT_AHCI_PORT_INTS)))
{
HWREG32(port->regs + RT_AHCI_PORT_INTS) = value;
}
HWREG32(host->regs + RT_AHCI_HBA_INTS) = RT_BIT(i);
value = HWREG32(port->regs + RT_AHCI_PORT_SSTS);
if ((value & RT_AHCI_PORT_SSTS_DET_MASK) == RT_AHCI_PORT_SSTS_DET_PHYRDY)
{
port->link = RT_TRUE;
}
}
HWREG32(host->regs + RT_AHCI_HBA_GHC) |= RT_AHCI_GHC_IRQ_EN;
for (int i = 0; i < host->ports_nr; ++i)
{
void *dma;
rt_ubase_t dma_addr;
rt_tick_t timeout;
struct rt_ahci_port *port = &host->ports[i];
if (!port->link)
{
continue;
}
/*
* 6. Alloc transport memory, Port x Command List and FIS Base Address.
*/
port->dma = rt_dma_alloc_coherent(host->parent.dev,
RT_AHCI_DMA_SIZE, &port->dma_handle);
if (!port->dma)
{
LOG_E("No memory to setup port[%d]", i);
break;
}
dma = port->dma;
rt_memset(dma, 0, RT_AHCI_DMA_SIZE);
port->cmd_slot = dma;
dma += (RT_AHCI_CMD_SLOT_SIZE + 224);
port->rx_fis = dma;
dma += RT_AHCI_RX_FIS_SIZE;
port->cmd_tbl = dma;
port->cmd_tbl_dma = (rt_ubase_t)rt_kmem_v2p(dma);
dma += RT_AHCI_CMD_TBL_HDR;
port->cmd_tbl_sg = dma;
dma_addr = (rt_ubase_t)rt_kmem_v2p(port->cmd_slot);
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_CLB, rt_lower_32_bits(dma_addr));
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_CLBU, rt_upper_32_bits(dma_addr));
dma_addr = (rt_ubase_t)rt_kmem_v2p(port->rx_fis);
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_FB, rt_lower_32_bits(dma_addr));
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_FBU, rt_upper_32_bits(dma_addr));
if (host->ops->port_dma_init && (err = host->ops->port_dma_init(host, port)))
{
LOG_E("Init port[%d] DMA error = %s", rt_strerror(err));
}
HWREG32_FLUSH(port->regs + RT_AHCI_PORT_CMD, RT_AHCI_PORT_CMD_ACTIVE |
RT_AHCI_PORT_CMD_FIS_RX | RT_AHCI_PORT_CMD_POWER_ON |
RT_AHCI_PORT_CMD_SPIN_UP | RT_AHCI_PORT_CMD_START);
/* Wait spinup */
err = -RT_ETIMEOUT;
timeout = rt_tick_from_millisecond(20000);
timeout += rt_tick_get();
do {
if (!(HWREG32(port->regs + RT_AHCI_PORT_TFD) & RT_AHCI_PORT_TFDATA_BSY))
{
err = RT_EOK;
break;
}
rt_hw_cpu_relax();
} while (rt_tick_get() < timeout);
if (err)
{
rt_dma_free_coherent(host->parent.dev, RT_AHCI_DMA_SIZE, port->dma,
port->dma_handle);
port->dma = RT_NULL;
LOG_E("Start up port[%d] fail", i);
continue;
}
port->int_enabled |= RT_AHCI_PORT_INTE_HBUS_ERR | RT_AHCI_PORT_INTE_IF_ERR |
RT_AHCI_PORT_INTE_CONNECT | RT_AHCI_PORT_INTE_PHYRDY |
RT_AHCI_PORT_INTE_UNK_FIS | RT_AHCI_PORT_INTE_BAD_PMP |
RT_AHCI_PORT_INTE_TF_ERR | RT_AHCI_PORT_INTE_HBUS_DATA_ERR |
RT_AHCI_PORT_INTE_SG_DONE | RT_AHCI_PORT_INTE_SDB_FIS |
RT_AHCI_PORT_INTE_DMAS_FIS | RT_AHCI_PORT_INTE_PIOS_FIS |
RT_AHCI_PORT_INTE_D2H_REG_FIS;
HWREG32(port->regs + RT_AHCI_PORT_INTE) = port->int_enabled;
rt_completion_init(&port->done);
}
rt_snprintf(dev_name, sizeof(dev_name), "ahci-%s",
rt_dm_dev_get_name(host->parent.dev));
rt_hw_interrupt_install(host->irq, ahci_isr, host, dev_name);
rt_hw_interrupt_umask(host->irq);
scsi = &host->parent;
scsi->max_lun = rt_max_t(rt_size_t, scsi->max_lun, 1);
scsi->max_id = host->ports_nr;
scsi->ops = &ahci_scsi_ops;
if ((err = rt_scsi_host_register(scsi)))
{
goto _fail;
}
return RT_EOK;
_fail:
rt_hw_interrupt_mask(host->irq);
rt_pic_detach_irq(host->irq, host);
return err;
}
rt_err_t rt_ahci_host_unregister(struct rt_ahci_host *host)
{
rt_err_t err;
struct rt_scsi_host *scsi;
if (!host)
{
return -RT_EINVAL;
}
scsi = &host->parent;
if ((err = rt_scsi_host_unregister(scsi)))
{
return err;
}
rt_hw_interrupt_mask(host->irq);
rt_pic_detach_irq(host->irq, host);
for (int i = 0; i < host->ports_nr; ++i)
{
struct rt_ahci_port *port = &host->ports[i];
if (port->ataid)
{
rt_free(port->ataid);
}
HWREG32(port->regs) &= ~(RT_AHCI_PORT_CMD_ACTIVE | RT_AHCI_PORT_CMD_POWER_ON |
RT_AHCI_PORT_CMD_SPIN_UP | RT_AHCI_PORT_CMD_START);
if (port->dma)
{
rt_dma_free_coherent(host->parent.dev, RT_AHCI_DMA_SIZE, port->dma,
port->dma_handle);
}
}
HWREG32(host->regs + RT_AHCI_HBA_GHC) &= ~(RT_AHCI_GHC_AHCI_EN | RT_AHCI_GHC_IRQ_EN);
return RT_EOK;
}

View File

@@ -0,0 +1,612 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017-05-09 Urey first version
* 2019-07-09 Zero-Free improve device ops interface and data flows
*/
#include <stdio.h>
#include <string.h>
#include <rthw.h>
#include <rtdevice.h>
#define DBG_TAG "audio"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
enum
{
REPLAY_EVT_NONE = 0x00,
REPLAY_EVT_START = 0x01,
REPLAY_EVT_STOP = 0x02,
};
static rt_err_t _audio_send_replay_frame(struct rt_audio_device *audio)
{
rt_err_t result = RT_EOK;
rt_uint8_t *data;
rt_size_t dst_size, src_size;
rt_uint16_t position, remain_bytes = 0, index = 0;
struct rt_audio_buf_info *buf_info;
RT_ASSERT(audio != RT_NULL);
buf_info = &audio->replay->buf_info;
/* save current pos */
position = audio->replay->pos;
dst_size = buf_info->block_size;
/* check replay queue is empty */
if (rt_data_queue_peek(&audio->replay->queue, (const void **)&data, &src_size) != RT_EOK)
{
/* ack stop event */
if (audio->replay->event & REPLAY_EVT_STOP)
rt_completion_done(&audio->replay->cmp);
/* send zero frames */
rt_memset(&buf_info->buffer[audio->replay->pos], 0, dst_size);
audio->replay->pos += dst_size;
audio->replay->pos %= buf_info->total_size;
}
else
{
rt_memset(&buf_info->buffer[audio->replay->pos], 0, dst_size);
/* copy data from memory pool to hardware device fifo */
while (index < dst_size)
{
result = rt_data_queue_peek(&audio->replay->queue, (const void **)&data, &src_size);
if (result != RT_EOK)
{
LOG_D("under run %d, remain %d", audio->replay->pos, remain_bytes);
audio->replay->pos -= remain_bytes;
audio->replay->pos += dst_size;
audio->replay->pos %= buf_info->total_size;
audio->replay->read_index = 0;
result = -RT_EEMPTY;
break;
}
remain_bytes = MIN((dst_size - index), (src_size - audio->replay->read_index));
rt_memcpy(&buf_info->buffer[audio->replay->pos],
&data[audio->replay->read_index], remain_bytes);
index += remain_bytes;
audio->replay->read_index += remain_bytes;
audio->replay->pos += remain_bytes;
audio->replay->pos %= buf_info->total_size;
if (audio->replay->read_index == src_size)
{
/* free memory */
audio->replay->read_index = 0;
rt_data_queue_pop(&audio->replay->queue, (const void **)&data, &src_size, RT_WAITING_NO);
rt_mp_free(data);
/* notify transmitted complete. */
if (audio->parent.tx_complete != RT_NULL)
audio->parent.tx_complete(&audio->parent, (void *)data);
}
}
}
if (audio->ops->transmit != RT_NULL)
{
if (audio->ops->transmit(audio, &buf_info->buffer[position], RT_NULL, dst_size) != dst_size)
result = -RT_ERROR;
}
return result;
}
static rt_err_t _audio_flush_replay_frame(struct rt_audio_device *audio)
{
rt_err_t result = RT_EOK;
if (audio->replay->write_index)
{
result = rt_data_queue_push(&audio->replay->queue,
(const void **)audio->replay->write_data,
audio->replay->write_index,
RT_WAITING_FOREVER);
audio->replay->write_index = 0;
}
return result;
}
static rt_err_t _aduio_replay_start(struct rt_audio_device *audio)
{
rt_err_t result = RT_EOK;
if (audio->replay->activated != RT_TRUE)
{
/* start playback hardware device */
if (audio->ops->start)
result = audio->ops->start(audio, AUDIO_STREAM_REPLAY);
audio->replay->activated = RT_TRUE;
LOG_D("start audio replay device");
}
return result;
}
static rt_err_t _aduio_replay_stop(struct rt_audio_device *audio)
{
rt_err_t result = RT_EOK;
if (audio->replay->activated == RT_TRUE)
{
/* flush replay remian frames */
_audio_flush_replay_frame(audio);
/* notify irq(or thread) to stop the data transmission */
audio->replay->event |= REPLAY_EVT_STOP;
/* waiting for the remaining data transfer to complete */
rt_completion_init(&audio->replay->cmp);
rt_completion_wait(&audio->replay->cmp, RT_WAITING_FOREVER);
audio->replay->event &= ~REPLAY_EVT_STOP;
/* stop playback hardware device */
if (audio->ops->stop)
result = audio->ops->stop(audio, AUDIO_STREAM_REPLAY);
audio->replay->activated = RT_FALSE;
LOG_D("stop audio replay device");
}
return result;
}
static rt_err_t _audio_record_start(struct rt_audio_device *audio)
{
rt_err_t result = RT_EOK;
if (audio->record->activated != RT_TRUE)
{
/* open audio record pipe */
rt_device_open(RT_DEVICE(&audio->record->pipe), RT_DEVICE_OFLAG_RDONLY);
/* start record hardware device */
if (audio->ops->start)
result = audio->ops->start(audio, AUDIO_STREAM_RECORD);
audio->record->activated = RT_TRUE;
LOG_D("start audio record device");
}
return result;
}
static rt_err_t _audio_record_stop(struct rt_audio_device *audio)
{
rt_err_t result = RT_EOK;
if (audio->record->activated == RT_TRUE)
{
/* stop record hardware device */
if (audio->ops->stop)
result = audio->ops->stop(audio, AUDIO_STREAM_RECORD);
/* close audio record pipe */
rt_device_close(RT_DEVICE(&audio->record->pipe));
audio->record->activated = RT_FALSE;
LOG_D("stop audio record device");
}
return result;
}
static rt_err_t _audio_dev_init(struct rt_device *dev)
{
rt_err_t result = RT_EOK;
struct rt_audio_device *audio;
RT_ASSERT(dev != RT_NULL);
audio = (struct rt_audio_device *) dev;
/* initialize replay & record */
audio->replay = RT_NULL;
audio->record = RT_NULL;
/* initialize replay */
if (dev->flag & RT_DEVICE_FLAG_WRONLY)
{
struct rt_audio_replay *replay = (struct rt_audio_replay *) rt_malloc(sizeof(struct rt_audio_replay));
if (replay == RT_NULL)
return -RT_ENOMEM;
rt_memset(replay, 0, sizeof(struct rt_audio_replay));
/* init memory pool for replay */
replay->mp = rt_mp_create("adu_mp", RT_AUDIO_REPLAY_MP_BLOCK_COUNT, RT_AUDIO_REPLAY_MP_BLOCK_SIZE);
if (replay->mp == RT_NULL)
{
rt_free(replay);
LOG_E("create memory pool for replay failed");
return -RT_ENOMEM;
}
/* init queue for audio replay */
rt_data_queue_init(&replay->queue, CFG_AUDIO_REPLAY_QUEUE_COUNT, 0, RT_NULL);
/* init mutex lock for audio replay */
rt_mutex_init(&replay->lock, "replay", RT_IPC_FLAG_PRIO);
replay->activated = RT_FALSE;
audio->replay = replay;
}
/* initialize record */
if (dev->flag & RT_DEVICE_FLAG_RDONLY)
{
struct rt_audio_record *record = (struct rt_audio_record *) rt_malloc(sizeof(struct rt_audio_record));
rt_uint8_t *buffer;
if (record == RT_NULL)
return -RT_ENOMEM;
rt_memset(record, 0, sizeof(struct rt_audio_record));
/* init pipe for record*/
buffer = rt_malloc(RT_AUDIO_RECORD_PIPE_SIZE);
if (buffer == RT_NULL)
{
rt_free(record);
LOG_E("malloc memory for for record pipe failed");
return -RT_ENOMEM;
}
rt_audio_pipe_init(&record->pipe, "record",
(rt_int32_t)(RT_PIPE_FLAG_FORCE_WR | RT_PIPE_FLAG_BLOCK_RD),
buffer,
RT_AUDIO_RECORD_PIPE_SIZE);
record->activated = RT_FALSE;
audio->record = record;
}
/* initialize hardware configuration */
if (audio->ops->init)
audio->ops->init(audio);
/* get replay buffer information */
if (audio->ops->buffer_info)
audio->ops->buffer_info(audio, &audio->replay->buf_info);
return result;
}
static rt_err_t _audio_dev_open(struct rt_device *dev, rt_uint16_t oflag)
{
struct rt_audio_device *audio;
RT_ASSERT(dev != RT_NULL);
audio = (struct rt_audio_device *) dev;
/* check device flag with the open flag */
if ((oflag & RT_DEVICE_OFLAG_RDONLY) && !(dev->flag & RT_DEVICE_FLAG_RDONLY))
return -RT_EIO;
if ((oflag & RT_DEVICE_OFLAG_WRONLY) && !(dev->flag & RT_DEVICE_FLAG_WRONLY))
return -RT_EIO;
/* get open flags */
dev->open_flag = oflag & 0xff;
/* initialize the Rx/Tx structure according to open flag */
if (oflag & RT_DEVICE_OFLAG_WRONLY)
{
if (audio->replay->activated != RT_TRUE)
{
LOG_D("open audio replay device, oflag = %x\n", oflag);
audio->replay->write_index = 0;
audio->replay->read_index = 0;
audio->replay->pos = 0;
audio->replay->event = REPLAY_EVT_NONE;
}
dev->open_flag |= RT_DEVICE_OFLAG_WRONLY;
}
if (oflag & RT_DEVICE_OFLAG_RDONLY)
{
/* open record pipe */
if (audio->record->activated != RT_TRUE)
{
LOG_D("open audio record device ,oflag = %x\n", oflag);
_audio_record_start(audio);
audio->record->activated = RT_TRUE;
}
dev->open_flag |= RT_DEVICE_OFLAG_RDONLY;
}
return RT_EOK;
}
static rt_err_t _audio_dev_close(struct rt_device *dev)
{
struct rt_audio_device *audio;
RT_ASSERT(dev != RT_NULL);
audio = (struct rt_audio_device *) dev;
if (dev->open_flag & RT_DEVICE_OFLAG_WRONLY)
{
/* stop replay stream */
_aduio_replay_stop(audio);
dev->open_flag &= ~RT_DEVICE_OFLAG_WRONLY;
}
if (dev->open_flag & RT_DEVICE_OFLAG_RDONLY)
{
/* stop record stream */
_audio_record_stop(audio);
dev->open_flag &= ~RT_DEVICE_OFLAG_RDONLY;
}
return RT_EOK;
}
static rt_ssize_t _audio_dev_read(struct rt_device *dev, rt_off_t pos, void *buffer, rt_size_t size)
{
struct rt_audio_device *audio;
RT_ASSERT(dev != RT_NULL);
audio = (struct rt_audio_device *) dev;
if (!(dev->open_flag & RT_DEVICE_OFLAG_RDONLY) || (audio->record == RT_NULL))
return 0;
return rt_device_read(RT_DEVICE(&audio->record->pipe), pos, buffer, size);
}
static rt_ssize_t _audio_dev_write(struct rt_device *dev, rt_off_t pos, const void *buffer, rt_size_t size)
{
struct rt_audio_device *audio;
rt_uint8_t *ptr;
rt_uint16_t block_size, remain_bytes, index = 0;
RT_ASSERT(dev != RT_NULL);
audio = (struct rt_audio_device *) dev;
if (!(dev->open_flag & RT_DEVICE_OFLAG_WRONLY) || (audio->replay == RT_NULL))
return 0;
/* push a new frame to replay data queue */
ptr = (rt_uint8_t *)buffer;
block_size = RT_AUDIO_REPLAY_MP_BLOCK_SIZE;
rt_mutex_take(&audio->replay->lock, RT_WAITING_FOREVER);
while (index < size)
{
/* request buffer from replay memory pool */
if (audio->replay->write_index % block_size == 0)
{
audio->replay->write_data = rt_mp_alloc(audio->replay->mp, RT_WAITING_FOREVER);
rt_memset(audio->replay->write_data, 0, block_size);
}
/* copy data to replay memory pool */
remain_bytes = MIN((block_size - audio->replay->write_index), (size - index));
rt_memcpy(&audio->replay->write_data[audio->replay->write_index], &ptr[index], remain_bytes);
index += remain_bytes;
audio->replay->write_index += remain_bytes;
audio->replay->write_index %= block_size;
if (audio->replay->write_index == 0)
{
rt_data_queue_push(&audio->replay->queue,
audio->replay->write_data,
block_size,
RT_WAITING_FOREVER);
}
}
rt_mutex_release(&audio->replay->lock);
/* check replay state */
if (audio->replay->activated != RT_TRUE)
{
_aduio_replay_start(audio);
audio->replay->activated = RT_TRUE;
}
return index;
}
static rt_err_t _audio_dev_control(struct rt_device *dev, int cmd, void *args)
{
rt_err_t result = RT_EOK;
struct rt_audio_device *audio;
RT_ASSERT(dev != RT_NULL);
audio = (struct rt_audio_device *) dev;
/* dev stat...*/
switch (cmd)
{
case AUDIO_CTL_GETCAPS:
{
struct rt_audio_caps *caps = (struct rt_audio_caps *) args;
LOG_D("AUDIO_CTL_GETCAPS: main_type = %d,sub_type = %d", caps->main_type, caps->sub_type);
if (audio->ops->getcaps != RT_NULL)
{
result = audio->ops->getcaps(audio, caps);
}
break;
}
case AUDIO_CTL_CONFIGURE:
{
struct rt_audio_caps *caps = (struct rt_audio_caps *) args;
LOG_D("AUDIO_CTL_CONFIGURE: main_type = %d,sub_type = %d", caps->main_type, caps->sub_type);
if (audio->ops->configure != RT_NULL)
{
result = audio->ops->configure(audio, caps);
}
break;
}
case AUDIO_CTL_START:
{
int stream = *(int *) args;
LOG_D("AUDIO_CTL_START: stream = %d", stream);
if (stream == AUDIO_STREAM_REPLAY)
{
result = _aduio_replay_start(audio);
}
else
{
result = _audio_record_start(audio);
}
break;
}
case AUDIO_CTL_STOP:
{
int stream = *(int *) args;
LOG_D("AUDIO_CTL_STOP: stream = %d", stream);
if (stream == AUDIO_STREAM_REPLAY)
{
result = _aduio_replay_stop(audio);
}
else
{
result = _audio_record_stop(audio);
}
break;
}
default:
break;
}
return result;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops audio_ops =
{
_audio_dev_init,
_audio_dev_open,
_audio_dev_close,
_audio_dev_read,
_audio_dev_write,
_audio_dev_control
};
#endif
rt_err_t rt_audio_register(struct rt_audio_device *audio, const char *name, rt_uint32_t flag, void *data)
{
rt_err_t result = RT_EOK;
struct rt_device *device;
RT_ASSERT(audio != RT_NULL);
device = &(audio->parent);
device->type = RT_Device_Class_Sound;
device->rx_indicate = RT_NULL;
device->tx_complete = RT_NULL;
#ifdef RT_USING_DEVICE_OPS
device->ops = &audio_ops;
#else
device->init = _audio_dev_init;
device->open = _audio_dev_open;
device->close = _audio_dev_close;
device->read = _audio_dev_read;
device->write = _audio_dev_write;
device->control = _audio_dev_control;
#endif
device->user_data = data;
/* register a character device */
result = rt_device_register(device, name, flag | RT_DEVICE_FLAG_REMOVABLE);
/* initialize audio device */
if (result == RT_EOK)
result = rt_device_init(device);
return result;
}
int rt_audio_samplerate_to_speed(rt_uint32_t bitValue)
{
int speed = 0;
switch (bitValue)
{
case AUDIO_SAMP_RATE_8K:
speed = 8000;
break;
case AUDIO_SAMP_RATE_11K:
speed = 11052;
break;
case AUDIO_SAMP_RATE_16K:
speed = 16000;
break;
case AUDIO_SAMP_RATE_22K:
speed = 22050;
break;
case AUDIO_SAMP_RATE_32K:
speed = 32000;
break;
case AUDIO_SAMP_RATE_44K:
speed = 44100;
break;
case AUDIO_SAMP_RATE_48K:
speed = 48000;
break;
case AUDIO_SAMP_RATE_96K:
speed = 96000;
break;
case AUDIO_SAMP_RATE_128K:
speed = 128000;
break;
case AUDIO_SAMP_RATE_160K:
speed = 160000;
break;
case AUDIO_SAMP_RATE_172K:
speed = 176400;
break;
case AUDIO_SAMP_RATE_192K:
speed = 192000;
break;
default:
break;
}
return speed;
}
void rt_audio_tx_complete(struct rt_audio_device *audio)
{
/* try to send next frame */
_audio_send_replay_frame(audio);
}
void rt_audio_rx_done(struct rt_audio_device *audio, rt_uint8_t *pbuf, rt_size_t len)
{
/* save data to record pipe */
rt_device_write(RT_DEVICE(&audio->record->pipe), 0, pbuf, len);
/* invoke callback */
if (audio->parent.rx_indicate != RT_NULL)
audio->parent.rx_indicate(&audio->parent, len);
}

View File

@@ -0,0 +1,296 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-09-30 Bernard first version.
*/
#include <rthw.h>
#include <rtdevice.h>
#include "dev_audio_pipe.h"
static void _rt_audio_pipe_resume_writer(struct rt_audio_pipe *pipe)
{
if (!rt_list_isempty(&pipe->suspended_write_list))
{
rt_thread_t thread;
RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_WR);
/* get suspended thread */
thread = RT_THREAD_LIST_NODE_ENTRY(pipe->suspended_write_list.next);
/* resume the write thread */
rt_thread_resume(thread);
rt_schedule();
}
}
static rt_ssize_t rt_audio_pipe_read(rt_device_t dev,
rt_off_t pos,
void *buffer,
rt_size_t size)
{
rt_base_t level;
rt_thread_t thread;
struct rt_audio_pipe *pipe;
rt_size_t read_nbytes;
pipe = (struct rt_audio_pipe *)dev;
RT_ASSERT(pipe != RT_NULL);
if (!(pipe->flag & RT_PIPE_FLAG_BLOCK_RD))
{
level = rt_hw_interrupt_disable();
read_nbytes = rt_ringbuffer_get(&(pipe->ringbuffer), (rt_uint8_t *)buffer, size);
/* if the ringbuffer is empty, there won't be any writer waiting */
if (read_nbytes)
_rt_audio_pipe_resume_writer(pipe);
rt_hw_interrupt_enable(level);
return read_nbytes;
}
thread = rt_thread_self();
/* current context checking */
RT_DEBUG_NOT_IN_INTERRUPT;
do
{
level = rt_hw_interrupt_disable();
read_nbytes = rt_ringbuffer_get(&(pipe->ringbuffer), (rt_uint8_t *)buffer, size);
if (read_nbytes == 0)
{
rt_thread_suspend(thread);
/* waiting on suspended read list */
rt_list_insert_before(&(pipe->suspended_read_list),
&RT_THREAD_LIST_NODE(thread));
rt_hw_interrupt_enable(level);
rt_schedule();
}
else
{
_rt_audio_pipe_resume_writer(pipe);
rt_hw_interrupt_enable(level);
break;
}
}
while (read_nbytes == 0);
return read_nbytes;
}
static void _rt_audio_pipe_resume_reader(struct rt_audio_pipe *pipe)
{
if (pipe->parent.rx_indicate)
pipe->parent.rx_indicate(&pipe->parent,
rt_ringbuffer_data_len(&pipe->ringbuffer));
if (!rt_list_isempty(&pipe->suspended_read_list))
{
rt_thread_t thread;
RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_RD);
/* get suspended thread */
thread = RT_THREAD_LIST_NODE_ENTRY(pipe->suspended_read_list.next);
/* resume the read thread */
rt_thread_resume(thread);
rt_schedule();
}
}
static rt_ssize_t rt_audio_pipe_write(rt_device_t dev,
rt_off_t pos,
const void *buffer,
rt_size_t size)
{
rt_base_t level;
rt_thread_t thread;
struct rt_audio_pipe *pipe;
rt_size_t write_nbytes;
pipe = (struct rt_audio_pipe *)dev;
RT_ASSERT(pipe != RT_NULL);
if ((pipe->flag & RT_PIPE_FLAG_FORCE_WR) ||
!(pipe->flag & RT_PIPE_FLAG_BLOCK_WR))
{
level = rt_hw_interrupt_disable();
if (pipe->flag & RT_PIPE_FLAG_FORCE_WR)
write_nbytes = rt_ringbuffer_put_force(&(pipe->ringbuffer),
(const rt_uint8_t *)buffer, size);
else
write_nbytes = rt_ringbuffer_put(&(pipe->ringbuffer),
(const rt_uint8_t *)buffer, size);
_rt_audio_pipe_resume_reader(pipe);
rt_hw_interrupt_enable(level);
return write_nbytes;
}
thread = rt_thread_self();
/* current context checking */
RT_DEBUG_NOT_IN_INTERRUPT;
do
{
level = rt_hw_interrupt_disable();
write_nbytes = rt_ringbuffer_put(&(pipe->ringbuffer), (const rt_uint8_t *)buffer, size);
if (write_nbytes == 0)
{
/* pipe full, waiting on suspended write list */
rt_thread_suspend(thread);
/* waiting on suspended read list */
rt_list_insert_before(&(pipe->suspended_write_list),
&RT_THREAD_LIST_NODE(thread));
rt_hw_interrupt_enable(level);
rt_schedule();
}
else
{
_rt_audio_pipe_resume_reader(pipe);
rt_hw_interrupt_enable(level);
break;
}
}
while (write_nbytes == 0);
return write_nbytes;
}
static rt_err_t rt_audio_pipe_control(rt_device_t dev, int cmd, void *args)
{
struct rt_audio_pipe *pipe;
pipe = (struct rt_audio_pipe *)dev;
if (cmd == PIPE_CTRL_GET_SPACE && args)
*(rt_size_t *)args = rt_ringbuffer_space_len(&pipe->ringbuffer);
return RT_EOK;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops audio_pipe_ops =
{
RT_NULL,
RT_NULL,
RT_NULL,
rt_audio_pipe_read,
rt_audio_pipe_write,
rt_audio_pipe_control
};
#endif
/**
* This function will initialize a pipe device and put it under control of
* resource management.
*
* @param pipe the pipe device
* @param name the name of pipe device
* @param flag the attribute of the pipe device
* @param buf the buffer of pipe device
* @param size the size of pipe device buffer
*
* @return the operation status, RT_EOK on successful
*/
rt_err_t rt_audio_pipe_init(struct rt_audio_pipe *pipe,
const char *name,
rt_int32_t flag,
rt_uint8_t *buf,
rt_size_t size)
{
RT_ASSERT(pipe);
RT_ASSERT(buf);
/* initialize suspended list */
rt_list_init(&pipe->suspended_read_list);
rt_list_init(&pipe->suspended_write_list);
/* initialize ring buffer */
rt_ringbuffer_init(&pipe->ringbuffer, buf, size);
pipe->flag = flag;
/* create pipe */
pipe->parent.type = RT_Device_Class_Pipe;
#ifdef RT_USING_DEVICE_OPS
pipe->parent.ops = &audio_pipe_ops;
#else
pipe->parent.init = RT_NULL;
pipe->parent.open = RT_NULL;
pipe->parent.close = RT_NULL;
pipe->parent.read = rt_audio_pipe_read;
pipe->parent.write = rt_audio_pipe_write;
pipe->parent.control = rt_audio_pipe_control;
#endif
return rt_device_register(&(pipe->parent), name, RT_DEVICE_FLAG_RDWR);
}
/**
* This function will detach a pipe device from resource management
*
* @param pipe the pipe device
*
* @return the operation status, RT_EOK on successful
*/
rt_err_t rt_audio_pipe_detach(struct rt_audio_pipe *pipe)
{
return rt_device_unregister(&pipe->parent);
}
#ifdef RT_USING_HEAP
rt_err_t rt_audio_pipe_create(const char *name, rt_int32_t flag, rt_size_t size)
{
rt_uint8_t *rb_memptr = RT_NULL;
struct rt_audio_pipe *pipe = RT_NULL;
/* get aligned size */
size = RT_ALIGN(size, RT_ALIGN_SIZE);
pipe = (struct rt_audio_pipe *)rt_calloc(1, sizeof(struct rt_audio_pipe));
if (pipe == RT_NULL)
return -RT_ENOMEM;
/* create ring buffer of pipe */
rb_memptr = (rt_uint8_t *)rt_malloc(size);
if (rb_memptr == RT_NULL)
{
rt_free(pipe);
return -RT_ENOMEM;
}
return rt_audio_pipe_init(pipe, name, flag, rb_memptr, size);
}
void rt_audio_pipe_destroy(struct rt_audio_pipe *pipe)
{
if (pipe == RT_NULL)
return;
/* un-register pipe device */
rt_audio_pipe_detach(pipe);
/* release memory */
rt_free(pipe->ringbuffer.buffer_ptr);
rt_free(pipe);
return;
}
#endif /* RT_USING_HEAP */

View File

@@ -0,0 +1,75 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __DEV_AUDIO_PIPE_H__
#define __DEV_AUDIO_PIPE_H__
/**
* Pipe Device
*/
#include <rtdevice.h>
#ifndef RT_PIPE_BUFSZ
#define PIPE_BUFSZ 512
#else
#define PIPE_BUFSZ RT_PIPE_BUFSZ
#endif
/* portal device */
struct rt_audio_portal_device
{
struct rt_device parent;
struct rt_device *write_dev;
struct rt_device *read_dev;
};
enum rt_audio_pipe_flag
{
/* both read and write won't block */
RT_PIPE_FLAG_NONBLOCK_RDWR = 0x00,
/* read would block */
RT_PIPE_FLAG_BLOCK_RD = 0x01,
/* write would block */
RT_PIPE_FLAG_BLOCK_WR = 0x02,
/* write to this pipe will discard some data when the pipe is full.
* When this flag is set, RT_PIPE_FLAG_BLOCK_WR will be ignored since write
* operation will always be success. */
RT_PIPE_FLAG_FORCE_WR = 0x04,
};
struct rt_audio_pipe
{
struct rt_device parent;
/* ring buffer in pipe device */
struct rt_ringbuffer ringbuffer;
rt_int32_t flag;
/* suspended list */
rt_list_t suspended_read_list;
rt_list_t suspended_write_list;
struct rt_audio_portal_device *write_portal;
struct rt_audio_portal_device *read_portal;
};
#define PIPE_CTRL_GET_SPACE 0x14 /**< get the remaining size of a pipe device */
rt_err_t rt_audio_pipe_init(struct rt_audio_pipe *pipe,
const char *name,
rt_int32_t flag,
rt_uint8_t *buf,
rt_size_t size);
rt_err_t rt_audio_pipe_detach(struct rt_audio_pipe *pipe);
#ifdef RT_USING_HEAP
rt_err_t rt_audio_pipe_create(const char *name, rt_int32_t flag, rt_size_t size);
void rt_audio_pipe_destroy(struct rt_audio_pipe *pipe);
#endif /* RT_USING_HEAP */
#endif /* __DEV_AUDIO_PIPE_H__ */

View File

@@ -0,0 +1,7 @@
menuconfig RT_USING_BLK
bool "Using Block device drivers"
default n
if RT_USING_BLK
rsource "partitions/Kconfig"
endif

View File

@@ -0,0 +1,23 @@
from building import *
group = []
objs = []
if not GetDepend(['RT_USING_BLK']):
Return('group')
cwd = GetCurrentDir()
list = os.listdir(cwd)
CPPPATH = [cwd + '/../include']
src = ['blk.c', 'blk_dev.c', 'blk_dfs.c', 'blk_partition.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
objs = objs + group
Return('objs')

View File

@@ -0,0 +1,573 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#define DBG_TAG "rtdm.blk"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "blk_dev.h"
#include "blk_dfs.h"
static void blk_remove_all(struct rt_blk_disk *disk)
{
struct rt_blk_device *blk, *blk_next;
/* Remove all partitions */
rt_list_for_each_entry_safe(blk, blk_next, &disk->part_nodes, list)
{
disk_remove_blk_dev(blk, RT_TRUE);
}
}
static rt_err_t blk_open(rt_device_t dev, rt_uint16_t oflag)
{
struct rt_blk_disk *disk = to_blk_disk(dev);
if (disk->read_only && (oflag & RT_DEVICE_OFLAG_WRONLY))
{
return -RT_EINVAL;
}
return RT_EOK;
}
static rt_err_t blk_close(rt_device_t dev)
{
return RT_EOK;
}
static rt_ssize_t blk_read(rt_device_t dev, rt_off_t sector,
void *buffer, rt_size_t sector_count)
{
rt_ssize_t res;
struct rt_blk_disk *disk = to_blk_disk(dev);
rt_sem_take(&disk->usr_lock, RT_WAITING_FOREVER);
res = disk->ops->read(disk, sector, buffer, sector_count);
rt_sem_release(&disk->usr_lock);
return res;
}
static rt_ssize_t blk_write(rt_device_t dev, rt_off_t sector,
const void *buffer, rt_size_t sector_count)
{
rt_ssize_t res;
struct rt_blk_disk *disk = to_blk_disk(dev);
if (!disk->read_only)
{
rt_sem_take(&disk->usr_lock, RT_WAITING_FOREVER);
res = disk->ops->write(disk, sector, buffer, sector_count);
rt_sem_release(&disk->usr_lock);
return res;
}
return -RT_ENOSYS;
}
static rt_ssize_t blk_parallel_read(rt_device_t dev, rt_off_t sector,
void *buffer, rt_size_t sector_count)
{
struct rt_blk_disk *disk = to_blk_disk(dev);
return disk->ops->read(disk, sector, buffer, sector_count);
}
static rt_ssize_t blk_parallel_write(rt_device_t dev, rt_off_t sector,
const void *buffer, rt_size_t sector_count)
{
struct rt_blk_disk *disk = to_blk_disk(dev);
if (!disk->read_only)
{
return disk->ops->write(disk, sector, buffer, sector_count);
}
return -RT_ENOSYS;
}
static rt_err_t blk_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t err;
struct rt_blk_disk *disk = to_blk_disk(dev);
switch (cmd)
{
case RT_DEVICE_CTRL_BLK_GETGEOME:
if (args)
{
err = disk->ops->getgeome(disk, args);
}
else
{
err = -RT_EINVAL;
}
break;
case RT_DEVICE_CTRL_BLK_SYNC:
if (disk->ops->sync)
{
rt_sem_take(&disk->usr_lock, RT_WAITING_FOREVER);
spin_lock(&disk->lock);
err = disk->ops->sync(disk);
spin_unlock(&disk->lock);
rt_sem_release(&disk->usr_lock);
}
else
{
err = -RT_ENOSYS;
}
break;
case RT_DEVICE_CTRL_BLK_ERASE:
if (disk->ops->erase)
{
rt_sem_take(&disk->usr_lock, RT_WAITING_FOREVER);
spin_lock(&disk->lock);
if (disk->parent.ref_count != 1)
{
err = -RT_EBUSY;
goto _unlock;
}
blk_remove_all(disk);
err = disk->ops->erase(disk);
_unlock:
spin_unlock(&disk->lock);
rt_sem_release(&disk->usr_lock);
}
else
{
err = -RT_ENOSYS;
}
break;
case RT_DEVICE_CTRL_BLK_AUTOREFRESH:
if (disk->ops->autorefresh)
{
err = disk->ops->autorefresh(disk, !!args);
}
else
{
err = -RT_ENOSYS;
}
break;
case RT_DEVICE_CTRL_BLK_PARTITION:
err = -RT_EINVAL;
break;
case RT_DEVICE_CTRL_BLK_SSIZEGET:
device_get_blk_ssize(dev, args);
err = RT_EOK;
break;
case RT_DEVICE_CTRL_ALL_BLK_SSIZEGET:
device_get_all_blk_ssize(dev, args);
err = RT_EOK;
break;
default:
if (disk->ops->control)
{
err = disk->ops->control(disk, RT_NULL, cmd, args);
}
else
{
err = -RT_ENOSYS;
}
break;
}
return err;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops blk_ops =
{
.open = blk_open,
.close = blk_close,
.read = blk_read,
.write = blk_write,
.control = blk_control,
};
const static struct rt_device_ops blk_parallel_ops =
{
.open = blk_open,
.close = blk_close,
.read = blk_parallel_read,
.write = blk_parallel_write,
.control = blk_control,
};
#endif /* RT_USING_DEVICE_OPS */
rt_err_t rt_hw_blk_disk_register(struct rt_blk_disk *disk)
{
rt_err_t err;
#ifdef RT_USING_DM
int device_id;
#endif
const char *disk_name;
rt_uint16_t flags = RT_DEVICE_FLAG_RDONLY;
if (!disk || !disk->ops)
{
return -RT_EINVAL;
}
#ifdef RT_USING_DM
if (!disk->ida)
{
return -RT_EINVAL;
}
#endif
#if RT_NAME_MAX > 0
if (disk->parent.parent.name[0] == '\0')
#else
if (disk->parent.parent.name)
#endif
{
return -RT_EINVAL;
}
#ifdef RT_USING_DM
if ((device_id = rt_dm_ida_alloc(disk->ida)) < 0)
{
return -RT_EFULL;
}
#endif
disk->__magic = RT_BLK_DISK_MAGIC;
disk_name = to_disk_name(disk);
err = rt_sem_init(&disk->usr_lock, disk_name, 1, RT_IPC_FLAG_PRIO);
if (err)
{
#ifdef RT_USING_DM
rt_dm_ida_free(disk->ida, device_id);
#endif
LOG_E("%s: Init user mutex error = %s", rt_strerror(err));
return err;
}
rt_list_init(&disk->part_nodes);
rt_spin_lock_init(&disk->lock);
disk->parent.type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
if (disk->parallel_io)
{
disk->parent.ops = &blk_parallel_ops;
}
else
{
disk->parent.ops = &blk_ops;
}
#else
disk->parent.open = blk_open;
disk->parent.close = blk_close;
if (disk->parallel_io)
{
disk->parent.read = blk_parallel_read;
disk->parent.write = blk_parallel_write;
}
else
{
disk->parent.read = blk_read;
disk->parent.write = blk_write;
}
disk->parent.control = blk_control;
#endif
if (!disk->ops->write)
{
disk->read_only = RT_TRUE;
}
if (!disk->read_only)
{
flags |= RT_DEVICE_FLAG_WRONLY;
}
#ifdef RT_USING_DM
disk->parent.master_id = disk->ida->master_id;
disk->parent.device_id = device_id;
#endif
device_set_blk_fops(&disk->parent);
err = rt_device_register(&disk->parent, disk_name, flags);
if (err)
{
rt_sem_detach(&disk->usr_lock);
}
/* Ignore partition scanning errors */
rt_blk_disk_probe_partition(disk);
return err;
}
rt_err_t rt_hw_blk_disk_unregister(struct rt_blk_disk *disk)
{
rt_err_t err;
if (!disk)
{
return -RT_EINVAL;
}
spin_lock(&disk->lock);
if (disk->parent.ref_count > 0)
{
err = -RT_EBUSY;
goto _unlock;
}
/* Flush all data */
if (disk->ops->sync)
{
err = disk->ops->sync(disk);
if (err)
{
LOG_E("%s: Sync error = %s", to_disk_name(disk), rt_strerror(err));
goto _unlock;
}
}
rt_sem_detach(&disk->usr_lock);
blk_remove_all(disk);
#ifdef RT_USING_DM
rt_dm_ida_free(disk->ida, disk->parent.device_id);
#endif
err = rt_device_unregister(&disk->parent);
_unlock:
spin_unlock(&disk->lock);
return err;
}
rt_ssize_t rt_blk_disk_get_capacity(struct rt_blk_disk *disk)
{
rt_ssize_t res;
struct rt_device_blk_geometry geometry;
if (!disk)
{
return -RT_EINVAL;
}
res = disk->ops->getgeome(disk, &geometry);
if (!res)
{
return geometry.sector_count;
}
return res;
}
rt_ssize_t rt_blk_disk_get_logical_block_size(struct rt_blk_disk *disk)
{
rt_ssize_t res;
struct rt_device_blk_geometry geometry;
if (!disk)
{
return -RT_EINVAL;
}
res = disk->ops->getgeome(disk, &geometry);
if (!res)
{
return geometry.bytes_per_sector;
}
return res;
}
#ifdef RT_USING_DFS_MNTTABLE
static int blk_dfs_mnt_table(void)
{
rt_ubase_t level;
struct rt_object *obj;
struct rt_device *dev;
struct rt_blk_disk *disk;
struct rt_blk_device *blk_dev;
struct rt_object_information *info = rt_object_get_information(RT_Object_Class_Device);
level = rt_hw_interrupt_disable();
rt_list_for_each_entry(obj, &info->object_list, list)
{
dev = rt_container_of(obj, struct rt_device, parent);
if (dev->type != RT_Device_Class_Block)
{
continue;
}
disk = to_blk_disk(dev);
if (disk->__magic != RT_BLK_DISK_MAGIC)
{
continue;
}
if (disk->max_partitions == RT_BLK_PARTITION_NONE)
{
dfs_mount_device(&disk->parent);
continue;
}
rt_list_for_each_entry(blk_dev, &disk->part_nodes, list)
{
dfs_mount_device(&blk_dev->parent);
}
}
rt_hw_interrupt_enable(level);
return 0;
}
INIT_ENV_EXPORT(blk_dfs_mnt_table);
#endif /* RT_USING_DFS_MNTTABLE */
#if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
const char *convert_size(struct rt_device_blk_geometry *geome,
rt_size_t sector_count, rt_size_t *out_cap, rt_size_t *out_minor)
{
rt_size_t cap, minor = 0;
int size_index = 0;
const char *size_name[] = { "B", "K", "M", "G", "T", "P", "E" };
cap = geome->bytes_per_sector * sector_count;
for (size_index = 0; size_index < RT_ARRAY_SIZE(size_name) - 1; ++size_index)
{
if (cap < 1024)
{
break;
}
/* Only one decimal point */
minor = (cap % 1024) * 10 / 1024;
cap = cap / 1024;
}
*out_cap = cap;
*out_minor = minor;
return size_name[size_index];
}
static int list_blk(int argc, char**argv)
{
rt_ubase_t level;
rt_size_t cap, minor;
const char *size_name;
struct rt_object *obj;
struct rt_device *dev;
struct rt_blk_disk *disk;
struct rt_blk_device *blk_dev;
struct rt_device_blk_geometry geome;
struct rt_object_information *info = rt_object_get_information(RT_Object_Class_Device);
level = rt_hw_interrupt_disable();
rt_kprintf("%-*.s MAJ:MIN RM SIZE\tRO TYPE MOUNTPOINT\n", RT_NAME_MAX, "NAME");
rt_list_for_each_entry(obj, &info->object_list, list)
{
dev = rt_container_of(obj, struct rt_device, parent);
if (dev->type != RT_Device_Class_Block)
{
continue;
}
disk = to_blk_disk(dev);
if (disk->__magic != RT_BLK_DISK_MAGIC)
{
continue;
}
if (disk->ops->getgeome(disk, &geome))
{
continue;
}
size_name = convert_size(&geome, geome.sector_count, &cap, &minor);
rt_kprintf("%-*.s %3u.%-3u %u %u.%u%s\t%u disk %s\n",
RT_NAME_MAX, to_disk_name(disk),
#ifdef RT_USING_DM
disk->parent.master_id, disk->parent.device_id,
#else
0, 0,
#endif
disk->removable, cap, minor, size_name, disk->read_only,
disk->max_partitions != RT_BLK_PARTITION_NONE ? "\b" :
(dfs_filesystem_get_mounted_path(&disk->parent) ? : "\b"));
rt_list_for_each_entry(blk_dev, &disk->part_nodes, list)
{
size_name = convert_size(&geome, blk_dev->sector_count, &cap, &minor);
rt_kprintf("%c--%-*.s %3u.%-3u %u %u.%u%s\t%u part %s\n",
blk_dev->list.next != &disk->part_nodes ? '|' : '`',
RT_NAME_MAX - 3, to_blk_name(blk_dev),
#ifdef RT_USING_DM
blk_dev->parent.master_id, blk_dev->parent.device_id,
#else
0, 0,
#endif
disk->removable, cap, minor, size_name, disk->read_only,
dfs_filesystem_get_mounted_path(&blk_dev->parent) ? : "");
}
}
rt_hw_interrupt_enable(level);
return 0;
}
MSH_CMD_EXPORT(list_blk, dump all of blks information);
#endif /* RT_USING_CONSOLE && RT_USING_MSH */

View File

@@ -0,0 +1,297 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI first version
*/
#include "blk_dev.h"
#include "blk_dfs.h"
#define DBG_TAG "blk.dm"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#ifdef RT_USING_DFS
#include <dfs_fs.h>
#endif
static rt_err_t blk_dev_open(rt_device_t dev, rt_uint16_t oflag)
{
struct rt_blk_device *blk = to_blk(dev);
return rt_device_open(&blk->disk->parent, oflag);
}
static rt_err_t blk_dev_close(rt_device_t dev)
{
struct rt_blk_device *blk = to_blk(dev);
return rt_device_close(&blk->disk->parent);
}
static rt_ssize_t blk_dev_read(rt_device_t dev, rt_off_t sector,
void *buffer, rt_size_t sector_count)
{
struct rt_blk_device *blk = to_blk(dev);
if (sector <= blk->sector_start + blk->sector_count &&
sector_count <= blk->sector_count)
{
return rt_device_read(&blk->disk->parent,
blk->sector_start + sector, buffer, sector_count);
}
return -RT_EINVAL;
}
static rt_ssize_t blk_dev_write(rt_device_t dev, rt_off_t sector,
const void *buffer, rt_size_t sector_count)
{
struct rt_blk_device *blk = to_blk(dev);
if (sector <= blk->sector_start + blk->sector_count &&
sector_count <= blk->sector_count)
{
return rt_device_write(&blk->disk->parent,
blk->sector_start + sector, buffer, sector_count);
}
return -RT_EINVAL;
}
static rt_err_t blk_dev_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t err = -RT_EINVAL;
struct rt_blk_device *blk = to_blk(dev);
struct rt_blk_disk *disk = blk->disk;
struct rt_device_blk_geometry disk_geometry, *geometry;
switch (cmd)
{
case RT_DEVICE_CTRL_BLK_GETGEOME:
if ((geometry = args))
{
if (!(err = disk->ops->getgeome(disk, &disk_geometry)))
{
geometry->bytes_per_sector = disk_geometry.bytes_per_sector;
geometry->block_size = disk_geometry.block_size;
geometry->sector_count = blk->sector_count;
}
}
else
{
err = -RT_EINVAL;
}
break;
case RT_DEVICE_CTRL_BLK_SYNC:
rt_device_control(&disk->parent, cmd, args);
break;
case RT_DEVICE_CTRL_BLK_ERASE:
case RT_DEVICE_CTRL_BLK_AUTOREFRESH:
if (disk->partitions <= 1)
{
rt_device_control(&disk->parent, cmd, args);
}
else
{
err = -RT_EIO;
}
break;
case RT_DEVICE_CTRL_BLK_PARTITION:
if (args)
{
rt_memcpy(args, &blk->partition, sizeof(blk->partition));
}
else
{
err = -RT_EINVAL;
}
break;
case RT_DEVICE_CTRL_BLK_SSIZEGET:
device_get_blk_ssize(dev, args);
err = RT_EOK;
break;
case RT_DEVICE_CTRL_ALL_BLK_SSIZEGET:
device_get_all_blk_ssize(dev, args);
err = RT_EOK;
break;
default:
if (disk->ops->control)
{
err = disk->ops->control(disk, blk, cmd, args);
}
break;
}
return err;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops blk_dev_ops =
{
.open = blk_dev_open,
.close = blk_dev_close,
.read = blk_dev_read,
.write = blk_dev_write,
.control = blk_dev_control,
};
#endif
rt_err_t blk_dev_initialize(struct rt_blk_device *blk)
{
struct rt_device *dev;
if (!blk)
{
return -RT_EINVAL;
}
dev = &blk->parent;
dev->type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
dev->ops = &blk_dev_ops;
#else
dev->open = blk_dev_open;
dev->close = blk_dev_close;
dev->read = blk_dev_read;
dev->write = blk_dev_write;
dev->control = blk_dev_control;
#endif
return RT_EOK;
}
rt_err_t disk_add_blk_dev(struct rt_blk_disk *disk, struct rt_blk_device *blk)
{
rt_err_t err;
#ifdef RT_USING_DM
int device_id;
#endif
const char *disk_name, *name_fmt;
if (!disk || !blk)
{
return -RT_EINVAL;
}
#ifdef RT_USING_DM
if ((device_id = rt_dm_ida_alloc(disk->ida)) < 0)
{
return -RT_EFULL;
}
#endif
blk->disk = disk;
rt_list_init(&blk->list);
disk_name = to_disk_name(disk);
/* End is [a-zA-Z] or [0-9] */
if (disk_name[rt_strlen(disk_name) - 1] < 'a')
{
name_fmt = "%sp%d";
}
else
{
name_fmt = "%s%d";
}
#ifdef RT_USING_DM
rt_dm_dev_set_name(&blk->parent, name_fmt, disk_name, blk->partno);
blk->parent.master_id = disk->ida->master_id;
blk->parent.device_id = device_id;
#else
rt_snprintf(blk->parent.parent.name, RT_NAME_MAX, name_fmt, disk_name, blk->partno);
#endif
device_set_blk_fops(&blk->parent);
err = rt_device_register(&blk->parent, to_blk_name(blk),
disk->parent.flag & RT_DEVICE_FLAG_RDWR);
if (err)
{
#ifdef RT_USING_DM
rt_dm_ida_free(disk->ida, device_id);
#endif
return err;
}
spin_lock(&disk->lock);
rt_list_insert_before(&disk->part_nodes, &blk->list);
spin_unlock(&disk->lock);
return RT_EOK;
}
rt_err_t disk_remove_blk_dev(struct rt_blk_device *blk, rt_bool_t lockless)
{
struct rt_blk_disk *disk;
if (!blk)
{
return -RT_EINVAL;
}
disk = blk->disk;
if (!disk)
{
return -RT_EINVAL;
}
else
{
#ifdef RT_USING_DFS
const char *mountpath;
if ((mountpath = dfs_filesystem_get_mounted_path(&blk->parent)))
{
dfs_unmount(mountpath);
LOG_D("%s: Unmount file system on %s",
to_blk_name(blk), mountpath);
}
#endif
}
#ifdef RT_USING_DM
rt_dm_ida_free(disk->ida, blk->parent.device_id);
#endif
rt_device_unregister(&blk->parent);
if (!lockless)
{
spin_lock(&disk->lock);
}
rt_list_remove(&blk->list);
if (!lockless)
{
spin_unlock(&disk->lock);
}
--disk->partitions;
return RT_EOK;
}
rt_uint32_t blk_request_ioprio(void)
{
struct rt_thread *task = rt_thread_self();
return task ? RT_SCHED_PRIV(task).current_priority : 0;
}

View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI first version
*/
#ifndef __BLK_DEV_H__
#define __BLK_DEV_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/blk.h>
#include <drivers/misc.h>
#define to_blk_disk(dev) rt_container_of(dev, struct rt_blk_disk, parent)
#define to_blk(dev) rt_container_of(dev, struct rt_blk_device, parent)
#ifdef RT_USING_DM
#define to_disk_name(disk) rt_dm_dev_get_name(&(disk)->parent)
#define to_blk_name(blk) rt_dm_dev_get_name(&(blk)->parent)
#else
#define to_disk_name(disk) (disk)->parent.parent.name
#define to_blk_name(blk) (blk)->parent.parent.name
#endif
/* %c%c name */
#define letter_name(n) ('a' + (n) / ((n) >= 26 ? (26 * 2) : 1)), ((n) >= 26 ? 'a' + (n) % 26 : '\0')
rt_inline void spin_lock(struct rt_spinlock *spinlock)
{
rt_hw_spin_lock(&spinlock->lock);
}
rt_inline void spin_unlock(struct rt_spinlock *spinlock)
{
rt_hw_spin_unlock(&spinlock->lock);
}
rt_err_t blk_dev_initialize(struct rt_blk_device *blk);
rt_err_t disk_add_blk_dev(struct rt_blk_disk *disk, struct rt_blk_device *blk);
rt_err_t disk_remove_blk_dev(struct rt_blk_device *blk, rt_bool_t lockless);
rt_uint32_t blk_request_ioprio(void);
#endif /* __BLK_DEV_H__ */

View File

@@ -0,0 +1,274 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-08-08 GuEe-GUI first version
*/
#include "blk_dfs.h"
#include <dfs_file.h>
#include <drivers/classes/block.h>
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_DFS_V2)
struct blk_fops_data
{
struct rt_device_blk_geometry geometry;
};
static int blk_fops_open(struct dfs_file *file)
{
struct rt_device *dev = file->vnode->data;
struct blk_fops_data *data = rt_malloc(sizeof(*data));
if (!data)
{
return (int)-RT_ENOMEM;
}
dev->user_data = data;
rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &data->geometry);
rt_device_control(dev, RT_DEVICE_CTRL_ALL_BLK_SSIZEGET, &file->vnode->size);
return 0;
}
static int blk_fops_close(struct dfs_file *file)
{
struct rt_device *dev = file->vnode->data;
rt_free(dev->user_data);
dev->user_data = RT_NULL;
return 0;
}
static int blk_fops_ioctl(struct dfs_file *file, int cmd, void *arg)
{
struct rt_device *dev = file->vnode->data;
return (int)rt_device_control(dev, cmd, arg);
}
static ssize_t blk_fops_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
{
void *rbuf;
rt_ssize_t res = 0;
int bytes_per_sector, blk_pos, first_offs, rsize = 0;
struct rt_device *dev = file->vnode->data;
struct blk_fops_data *data = dev->user_data;
bytes_per_sector = data->geometry.bytes_per_sector;
blk_pos = *pos / bytes_per_sector;
first_offs = *pos % bytes_per_sector;
if ((rbuf = rt_malloc(bytes_per_sector)))
{
/*
** #1: read first unalign block size.
*/
res = rt_device_read(dev, blk_pos, rbuf, 1);
if (res == 1)
{
if (count > bytes_per_sector - first_offs)
{
rsize = bytes_per_sector - first_offs;
}
else
{
rsize = count;
}
rt_memcpy(buf, rbuf + first_offs, rsize);
++blk_pos;
/*
** #2: read continuous block size.
*/
while (rsize < count)
{
res = rt_device_read(dev, blk_pos++, rbuf, 1);
if (res != 1)
{
break;
}
if (count - rsize >= bytes_per_sector)
{
rt_memcpy(buf + rsize, rbuf, bytes_per_sector);
rsize += bytes_per_sector;
}
else
{
rt_memcpy(buf + rsize, rbuf, count - rsize);
rsize = count;
}
}
*pos += rsize;
}
rt_free(rbuf);
}
return rsize;
}
static ssize_t blk_fops_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
{
void *rbuf;
rt_ssize_t res = 0;
int bytes_per_sector, blk_pos, first_offs, wsize = 0;
struct rt_device *dev = file->vnode->data;
struct blk_fops_data *data = dev->user_data;
bytes_per_sector = data->geometry.bytes_per_sector;
blk_pos = *pos / bytes_per_sector;
first_offs = *pos % bytes_per_sector;
/*
** #1: write first unalign block size.
*/
if (first_offs != 0)
{
if (count > bytes_per_sector - first_offs)
{
wsize = bytes_per_sector - first_offs;
}
else
{
wsize = count;
}
if ((rbuf = rt_malloc(bytes_per_sector)))
{
res = rt_device_read(dev, blk_pos, rbuf, 1);
if (res == 1)
{
rt_memcpy(rbuf + first_offs, buf, wsize);
res = rt_device_write(dev, blk_pos, (const void *)rbuf, 1);
if (res == 1)
{
blk_pos += 1;
rt_free(rbuf);
goto _goon;
}
}
rt_free(rbuf);
}
return 0;
}
_goon:
/*
** #2: write continuous block size.
*/
if ((count - wsize) / bytes_per_sector != 0)
{
res = rt_device_write(dev, blk_pos, buf + wsize, (count - wsize) / bytes_per_sector);
wsize += res * bytes_per_sector;
blk_pos += res;
if (res != (count - wsize) / bytes_per_sector)
{
*pos += wsize;
return wsize;
}
}
/*
** # 3: write last unalign block size.
*/
if ((count - wsize) != 0)
{
if ((rbuf = rt_malloc(bytes_per_sector)))
{
res = rt_device_read(dev, blk_pos, rbuf, 1);
if (res == 1)
{
rt_memcpy(rbuf, buf + wsize, count - wsize);
res = rt_device_write(dev, blk_pos, (const void *)rbuf, 1);
if (res == 1)
{
wsize += count - wsize;
}
}
rt_free(rbuf);
}
}
*pos += wsize;
return wsize;
}
static int blk_fops_flush(struct dfs_file *file)
{
struct rt_device *dev = file->vnode->data;
return (int)rt_device_control(dev, RT_DEVICE_CTRL_BLK_SYNC, RT_NULL);
}
static int blk_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
{
int mask = 0;
return mask;
}
const static struct dfs_file_ops blk_fops =
{
.open = blk_fops_open,
.close = blk_fops_close,
.ioctl = blk_fops_ioctl,
.read = blk_fops_read,
.write = blk_fops_write,
.flush = blk_fops_flush,
.lseek = generic_dfs_lseek,
.poll = blk_fops_poll
};
void device_set_blk_fops(struct rt_device *dev)
{
dev->fops = &blk_fops;
}
#else
void device_set_blk_fops(struct rt_device *dev)
{
}
#endif /* RT_USING_POSIX_DEVIO && RT_USING_DFS_V2 */
void device_get_blk_ssize(struct rt_device *dev, void *args)
{
rt_uint32_t bytes_per_sector;
struct rt_device_blk_geometry geometry;
rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &geometry);
bytes_per_sector = geometry.bytes_per_sector;
RT_ASSERT(sizeof(bytes_per_sector) == sizeof(geometry.bytes_per_sector));
rt_memcpy(args, &bytes_per_sector, sizeof(bytes_per_sector));
}
void device_get_all_blk_ssize(struct rt_device *dev, void *args)
{
rt_uint64_t count_mul_per;
struct rt_device_blk_geometry geometry;
rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &geometry);
count_mul_per = geometry.bytes_per_sector * geometry.sector_count;
rt_memcpy(args, &count_mul_per, sizeof(count_mul_per));
}

View File

@@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-08-08 GuEe-GUI first version
*/
#ifndef __BLK_DFS_H__
#define __BLK_DFS_H__
#include <rtdef.h>
#define RT_DEVICE_CTRL_BLK_SSIZEGET 0x00001268 /**< get number of bytes per sector */
#define RT_DEVICE_CTRL_ALL_BLK_SSIZEGET 0x80081272 /**< get number of bytes per sector * sector counts */
void device_set_blk_fops(struct rt_device *dev);
void device_get_blk_ssize(struct rt_device *dev, void *args);
void device_get_all_blk_ssize(struct rt_device *dev, void *args);
#endif /* __BLK_DFS_H__ */

View File

@@ -0,0 +1,154 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#define DBG_TAG "blk.part"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "blk_partition.h"
static rt_err_t (*partition_list[])(struct rt_blk_disk *) =
{
#ifdef RT_BLK_PARTITION_EFI
efi_partition,
#endif
#ifdef RT_BLK_PARTITION_DFS
dfs_partition,
#endif
};
rt_err_t blk_put_partition(struct rt_blk_disk *disk, const char *type,
rt_size_t start, rt_size_t count, int partno)
{
rt_err_t err;
struct rt_blk_device *blk = rt_calloc(1, sizeof(*blk));
if (type && rt_strcmp(type, "dfs"))
{
rt_uint32_t ssz = rt_blk_disk_get_logical_block_size(disk);
rt_kprintf("found part[%u], begin: %lu, size: ", partno, start * ssz);
if ((count >> 11) == 0)
{
rt_kprintf("%u%cB\n", count >> 1, 'K'); /* KB */
}
else
{
rt_uint32_t size_mb = count >> 11; /* MB */
if ((size_mb >> 10) == 0)
{
rt_kprintf("%u.%u%cB\n", size_mb, (count >> 1) & 0x3ff, 'M');
}
else
{
rt_kprintf("%u.%u%cB\n", size_mb >> 10, size_mb & 0x3ff, 'G');
}
}
}
if (!blk)
{
err = -RT_ENOMEM;
goto _fail;
}
err = blk_dev_initialize(blk);
if (err)
{
goto _fail;
}
blk->partno = partno;
blk->sector_start = start;
blk->sector_count = count;
blk->partition.offset = start;
blk->partition.size = count;
blk->partition.lock = &disk->usr_lock;
err = disk_add_blk_dev(disk, blk);
if (err)
{
goto _fail;
}
++disk->partitions;
return RT_EOK;
_fail:
LOG_E("%s: Put partition.%s[%u] start = %lu count = %lu error = %s",
to_disk_name(disk), type, partno, start, count, rt_strerror(err));
if (blk)
{
rt_free(blk);
}
return err;
}
rt_err_t rt_blk_disk_probe_partition(struct rt_blk_disk *disk)
{
rt_err_t err = RT_EOK;
if (!disk)
{
return -RT_EINVAL;
}
LOG_D("%s: Probing disk partitions", to_disk_name(disk));
if (disk->partitions)
{
return err;
}
err = -RT_EEMPTY;
if (disk->max_partitions == RT_BLK_PARTITION_NONE)
{
LOG_D("%s: Unsupported partitions", to_disk_name(disk));
return err;
}
for (int i = 0; i < RT_ARRAY_SIZE(partition_list); ++i)
{
rt_err_t part_err = partition_list[i](disk);
if (part_err == -RT_ENOMEM)
{
err = part_err;
break;
}
if (!part_err)
{
err = RT_EOK;
break;
}
}
if ((err && err != -RT_ENOMEM) || disk->partitions == 0)
{
/* No partition found */
rt_size_t total_sectors = rt_blk_disk_get_capacity(disk);
err = blk_put_partition(disk, RT_NULL, 0, total_sectors, 0);
}
return err;
}

View File

@@ -0,0 +1,22 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI first version
*/
#ifndef __BLK_PARTITION_H__
#define __BLK_PARTITION_H__
#include "blk_dev.h"
rt_err_t blk_put_partition(struct rt_blk_disk *disk, const char *type,
rt_size_t start, rt_size_t count, int partno);
rt_err_t dfs_partition(struct rt_blk_disk *disk);
rt_err_t efi_partition(struct rt_blk_disk *disk);
#endif /* __BLK_PARTITION_H__ */

View File

@@ -0,0 +1,12 @@
menu "Partition Types"
config RT_BLK_PARTITION_DFS
bool "DFS Partition support"
depends on RT_USING_DFS
default y
config RT_BLK_PARTITION_EFI
bool "EFI Globally Unique Identifier (GUID) Partition support"
default y
endmenu

View File

@@ -0,0 +1,18 @@
from building import *
group = []
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../../include']
src = []
if GetDepend(['RT_BLK_PARTITION_DFS']):
src += ['dfs.c']
if GetDepend(['RT_BLK_PARTITION_EFI']):
src += ['efi.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,55 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-07-25 weety first version
* 2023-02-25 GuEe-GUI make blk interface
*/
#include "efi.h"
#define DBG_TAG "blk.part.dfs"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
rt_err_t dfs_partition(struct rt_blk_disk *disk)
{
rt_ssize_t res;
struct dfs_partition part;
rt_uint8_t *sector = rt_malloc(rt_blk_disk_get_logical_block_size(disk));
if (!sector)
{
return -RT_ENOMEM;
}
res = disk->ops->read(disk, 0, sector, 1);
if (res < 0)
{
rt_free(sector);
return res;
}
for (rt_size_t i = 0; i < disk->max_partitions; ++i)
{
res = dfs_filesystem_get_partition(&part, sector, i);
if (res)
{
break;
}
if (blk_put_partition(disk, "dfs", part.offset, part.size, i) == -RT_ENOMEM)
{
break;
}
}
rt_free(sector);
return RT_EOK;
}

View File

@@ -0,0 +1,738 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-05-05 linzhenxing first version
* 2023-02-25 GuEe-GUI make blk interface
*/
#include "efi.h"
#define DBG_TAG "blk.part.efi"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static rt_bool_t force_gpt = 0;
static int force_gpt_setup(void)
{
#ifdef RT_USING_OFW
force_gpt = !!rt_ofw_bootargs_select("gpt", 0);
#endif
return 0;
}
INIT_CORE_EXPORT(force_gpt_setup);
/**
* @brief This function is EFI version of crc32 function.
*
* @param buf the buffer to calculate crc32 of.
* @param len the length of buf.
* @return EFI-style CRC32 value for @buf.
*/
rt_inline rt_uint32_t efi_crc32(const rt_uint8_t *buf, rt_size_t len)
{
rt_ubase_t crc = 0xffffffffUL;
for (rt_size_t i = 0; i < len; ++i)
{
crc ^= buf[i];
for (int j = 0; j < 8; ++j)
{
crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320L : 0);
}
}
return ~crc;
}
/**
* @brief This function get number of last logical block of device.
*
* @param disk the blk of disk.
* @return last LBA value on success, 0 on error.
* This is stored (by sd and ide-geometry) in
* the part[0] entry for this disk, and is the number of
* physical sectors available on the disk.
*/
static rt_size_t last_lba(struct rt_blk_disk *disk)
{
return rt_blk_disk_get_capacity(disk) - 1ULL;
}
rt_inline int pmbr_part_valid(gpt_mbr_record *part)
{
if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT)
{
return 0;
}
/* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */
if (rt_le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA)
{
return 0;
}
return GPT_MBR_PROTECTIVE;
}
/**
* @brief This function test Protective MBR for validity.
*
* @param mbr the pointer to a legacy mbr structure.
* @param total_sectors the amount of sectors in the device
* @return
* 0 -> Invalid MBR
* 1 -> GPT_MBR_PROTECTIVE
* 2 -> GPT_MBR_HYBRID
*/
static int is_pmbr_valid(legacy_mbr *mbr, rt_size_t total_sectors)
{
rt_uint32_t sz = 0;
int part = 0, ret = 0; /* invalid by default */
if (!mbr || rt_le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
{
goto _done;
}
for (int i = 0; i < 4; ++i)
{
ret = pmbr_part_valid(&mbr->partition_record[i]);
if (ret == GPT_MBR_PROTECTIVE)
{
part = i;
/*
* Ok, we at least know that there's a protective MBR,
* now check if there are other partition types for
* hybrid MBR.
*/
goto _check_hybrid;
}
}
if (ret != GPT_MBR_PROTECTIVE)
{
goto _done;
}
_check_hybrid:
for (int i = 0; i < 4; i++)
{
if (mbr->partition_record[i].os_type != EFI_PMBR_OSTYPE_EFI_GPT &&
mbr->partition_record[i].os_type != 0x00)
{
ret = GPT_MBR_HYBRID;
}
}
/*
* Protective MBRs take up the lesser of the whole disk
* or 2 TiB (32bit LBA), ignoring the rest of the disk.
* Some partitioning programs, nonetheless, choose to set
* the size to the maximum 32-bit limitation, disregarding
* the disk size.
*
* Hybrid MBRs do not necessarily comply with this.
*
* Consider a bad value here to be a warning to support dd'ing
* an image from a smaller disk to a larger disk.
*/
if (ret == GPT_MBR_PROTECTIVE)
{
sz = rt_le32_to_cpu(mbr->partition_record[part].size_in_lba);
if (sz != (rt_uint32_t)total_sectors - 1 && sz != 0xffffffff)
{
LOG_W("GPT: mbr size in lba (%u) different than whole disk (%u)",
sz, rt_min_t(rt_uint32_t, total_sectors - 1, 0xffffffff));
}
}
_done:
return ret;
}
/**
* @brief This function read bytes from disk, starting at given LBA.
*
* @param disk the blk of disk.
* @param lba the Logical Block Address of the partition table.
* @param buffer the destination buffer.
* @param count the bytes to read.
* @return number of bytes read on success, 0 on error.
*/
static rt_size_t read_lba(struct rt_blk_disk *disk,
rt_uint64_t lba, rt_uint8_t *buffer, rt_size_t count)
{
rt_size_t totalreadcount = 0;
if (!buffer || lba > last_lba(disk))
{
return 0;
}
for (rt_uint64_t n = lba; count; ++n)
{
int copied = 512;
disk->ops->read(disk, n, buffer, 1);
if (copied > count)
{
copied = count;
}
buffer += copied;
totalreadcount += copied;
count -= copied;
}
return totalreadcount;
}
/**
* @brief This function reads partition entries from disk.
*
* @param disk the blk of disk.
* @param gpt the GPT header
* @return ptes on success, null on error.
*/
static gpt_entry *alloc_read_gpt_entries(struct rt_blk_disk *disk,
gpt_header *gpt)
{
rt_size_t count;
gpt_entry *pte;
rt_uint64_t entry_lba;
if (!gpt)
{
return RT_NULL;
}
count = (rt_size_t)rt_le32_to_cpu(gpt->num_partition_entries) *
rt_le32_to_cpu(gpt->sizeof_partition_entry);
if (!count)
{
return RT_NULL;
}
pte = rt_malloc(count);
if (!pte)
{
return RT_NULL;
}
entry_lba = rt_le64_to_cpu(gpt->partition_entry_lba);
if (read_lba(disk, entry_lba, (rt_uint8_t *)pte, count) < count)
{
rt_free(pte);
pte = RT_NULL;
return RT_NULL;
}
/* Remember to free pte when done */
return pte;
}
/**
* @brief This function allocates GPT header, reads into it from disk.
*
* @param disk the blk of disk.
* @param lba the Logical Block Address of the partition table
* @return GPT header on success, null on error.
*/
static gpt_header *alloc_read_gpt_header(struct rt_blk_disk *disk, rt_uint64_t lba)
{
gpt_header *gpt;
rt_uint32_t ssz = rt_blk_disk_get_logical_block_size(disk);
gpt = rt_malloc(ssz);
if (!gpt)
{
return RT_NULL;
}
if (read_lba(disk, lba, (rt_uint8_t *)gpt, ssz) < ssz)
{
rt_free(gpt);
gpt = RT_NULL;
return RT_NULL;
}
/* Remember to free gpt when finished with it */
return gpt;
}
/**
* @brief This function tests one GPT header and PTEs for validity.
*
* @param disk the blk of disk.
* @param lba the Logical Block Address of the GPT header to test.
* @param gpt the GPT header ptr, filled on return.
* @param ptes the PTEs ptr, filled on return.
* @returns true if valid, false on error.
* If valid, returns pointers to newly allocated GPT header and PTEs.
*/
static rt_bool_t is_gpt_valid(struct rt_blk_disk *disk,
rt_uint64_t lba, gpt_header **gpt, gpt_entry **ptes)
{
rt_uint32_t crc, origcrc;
rt_uint64_t lastlba, pt_size;
rt_ssize_t logical_block_size;
if (!ptes)
{
return RT_FALSE;
}
if (!(*gpt = alloc_read_gpt_header(disk, lba)))
{
return RT_FALSE;
}
/* Check the GUID Partition Table signature */
if (rt_le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE)
{
LOG_D("%s: GUID Partition Table Header signature is wrong: %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->signature),
(rt_uint64_t)GPT_HEADER_SIGNATURE);
goto _fail;
}
/* Check the GUID Partition Table header size is too big */
logical_block_size = rt_blk_disk_get_logical_block_size(disk);
if (rt_le32_to_cpu((*gpt)->header_size) > logical_block_size)
{
LOG_D("%s: GUID Partition Table Header size is too large: %u > %u",
to_disk_name(disk),
rt_le32_to_cpu((*gpt)->header_size),
logical_block_size);
goto _fail;
}
/* Check the GUID Partition Table header size is too small */
if (rt_le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header))
{
LOG_D("%s: GUID Partition Table Header size is too small: %u < %u",
to_disk_name(disk),
rt_le32_to_cpu((*gpt)->header_size),
sizeof(gpt_header));
goto _fail;
}
/* Check the GUID Partition Table CRC */
origcrc = rt_le32_to_cpu((*gpt)->header_crc32);
(*gpt)->header_crc32 = 0;
crc = efi_crc32((const rt_uint8_t *)(*gpt), rt_le32_to_cpu((*gpt)->header_size));
if (crc != origcrc)
{
LOG_D("%s: GUID Partition Table Header CRC is wrong: %x != %x",
to_disk_name(disk), crc, origcrc);
goto _fail;
}
(*gpt)->header_crc32 = rt_cpu_to_le32(origcrc);
/*
* Check that the start_lba entry points to the LBA that contains
* the GUID Partition Table
*/
if (rt_le64_to_cpu((*gpt)->start_lba) != lba)
{
LOG_D("%s: GPT start_lba incorrect: %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->start_lba),
(rt_uint64_t)lba);
goto _fail;
}
/* Check the first_usable_lba and last_usable_lba are within the disk */
lastlba = last_lba(disk);
if (rt_le64_to_cpu((*gpt)->first_usable_lba) > lastlba)
{
LOG_D("%s: GPT: first_usable_lba incorrect: %lld > %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->first_usable_lba),
(rt_uint64_t)lastlba);
goto _fail;
}
if (rt_le64_to_cpu((*gpt)->last_usable_lba) > lastlba)
{
LOG_D("%s: GPT: last_usable_lba incorrect: %lld > %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->last_usable_lba),
(rt_uint64_t)lastlba);
goto _fail;
}
if (rt_le64_to_cpu((*gpt)->last_usable_lba) < rt_le64_to_cpu((*gpt)->first_usable_lba))
{
LOG_D("%s: GPT: last_usable_lba incorrect: %lld > %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->last_usable_lba),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->first_usable_lba));
goto _fail;
}
/* Check that sizeof_partition_entry has the correct value */
if (rt_le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry))
{
LOG_D("%s: GUID Partition Entry Size check failed", to_disk_name(disk));
goto _fail;
}
/* Sanity check partition table size */
pt_size = (rt_uint64_t)rt_le32_to_cpu((*gpt)->num_partition_entries) *
rt_le32_to_cpu((*gpt)->sizeof_partition_entry);
if (!(*ptes = alloc_read_gpt_entries(disk, *gpt)))
{
goto _fail;
}
/* Check the GUID Partition Entry Array CRC */
crc = efi_crc32((const rt_uint8_t *)(*ptes), pt_size);
if (crc != rt_le32_to_cpu((*gpt)->partition_entry_array_crc32))
{
LOG_D("%s: GUID Partition Entry Array CRC check failed", to_disk_name(disk));
goto _fail_ptes;
}
/* We're done, all's well */
return RT_TRUE;
_fail_ptes:
rt_free(*ptes);
*ptes = RT_NULL;
_fail:
rt_free(*gpt);
*gpt = RT_NULL;
return RT_FALSE;
}
/**
* @brief This function tests one PTE for validity.
*
* @param pte the pte to check.
* @param lastlba the last lba of the disk.
* @return valid boolean of pte.
*/
rt_inline rt_bool_t is_pte_valid(const gpt_entry *pte, const rt_size_t lastlba)
{
if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) ||
rt_le64_to_cpu(pte->starting_lba) > lastlba ||
rt_le64_to_cpu(pte->ending_lba) > lastlba)
{
return RT_FALSE;
}
return RT_TRUE;
}
/**
* @brief This function search disk for valid GPT headers and PTEs.
*
* @param disk the blk of disk.
* @param pgpt the primary GPT header.
* @param agpt the alternate GPT header.
* @param lastlba the last LBA number.
*/
static void compare_gpts(struct rt_blk_disk *disk,
gpt_header *pgpt, gpt_header *agpt, rt_uint64_t lastlba)
{
int error_found = 0;
if (!pgpt || !agpt)
{
return;
}
if (rt_le64_to_cpu(pgpt->start_lba) != rt_le64_to_cpu(agpt->alternate_lba))
{
LOG_W("%s: GPT:Primary header LBA(%lld) != Alt(%lld), header alternate_lba",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->start_lba),
(rt_uint64_t)rt_le64_to_cpu(agpt->alternate_lba));
++error_found;
}
if (rt_le64_to_cpu(pgpt->alternate_lba) != rt_le64_to_cpu(agpt->start_lba))
{
LOG_W("%s: GPT:Primary header alternate_lba(%lld) != Alt(%lld), header start_lba",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->alternate_lba),
(rt_uint64_t)rt_le64_to_cpu(agpt->start_lba));
++error_found;
}
if (rt_le64_to_cpu(pgpt->first_usable_lba) != rt_le64_to_cpu(agpt->first_usable_lba))
{
LOG_W("%s: GPT:first_usable_lbas don't match %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->first_usable_lba),
(rt_uint64_t)rt_le64_to_cpu(agpt->first_usable_lba));
++error_found;
}
if (rt_le64_to_cpu(pgpt->last_usable_lba) != rt_le64_to_cpu(agpt->last_usable_lba))
{
LOG_W("%s: GPT:last_usable_lbas don't match %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->last_usable_lba),
(rt_uint64_t)rt_le64_to_cpu(agpt->last_usable_lba));
++error_found;
}
if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid))
{
LOG_W("%s: GPT:disk_guids don't match", to_disk_name(disk));
++error_found;
}
if (rt_le32_to_cpu(pgpt->num_partition_entries) !=
rt_le32_to_cpu(agpt->num_partition_entries))
{
LOG_W("%s: GPT:num_partition_entries don't match: 0x%x != 0x%x",
to_disk_name(disk),
rt_le32_to_cpu(pgpt->num_partition_entries),
rt_le32_to_cpu(agpt->num_partition_entries));
++error_found;
}
if (rt_le32_to_cpu(pgpt->sizeof_partition_entry) !=
rt_le32_to_cpu(agpt->sizeof_partition_entry))
{
LOG_W("%s: GPT:sizeof_partition_entry values don't match: 0x%x != 0x%x",
to_disk_name(disk),
rt_le32_to_cpu(pgpt->sizeof_partition_entry),
rt_le32_to_cpu(agpt->sizeof_partition_entry));
++error_found;
}
if (rt_le32_to_cpu(pgpt->partition_entry_array_crc32) !=
rt_le32_to_cpu(agpt->partition_entry_array_crc32))
{
LOG_W("%s: GPT:partition_entry_array_crc32 values don't match: 0x%x != 0x%x",
to_disk_name(disk),
rt_le32_to_cpu(pgpt->partition_entry_array_crc32),
rt_le32_to_cpu(agpt->partition_entry_array_crc32));
++error_found;
}
if (rt_le64_to_cpu(pgpt->alternate_lba) != lastlba)
{
LOG_W("%s: GPT:Primary header thinks Alt. header is not at the end of the disk: %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->alternate_lba),
(rt_uint64_t)lastlba);
++error_found;
}
if (rt_le64_to_cpu(agpt->start_lba) != lastlba)
{
LOG_W("%s: GPT:Alternate GPT header not at the end of the disk: %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(agpt->start_lba),
(rt_uint64_t)lastlba);
++error_found;
}
if (error_found)
{
LOG_W("GPT: Use GNU Parted to correct GPT errors");
}
}
/**
* @brief This function search disk for valid GPT headers and PTEs.
*
* @param disk the disk parsed partitions.
* @param gpt the GPT header ptr, filled on return.
* @param ptes the PTEs ptr, filled on return.
* @return 1 if valid, 0 on error.
* If valid, returns pointers to newly allocated GPT header and PTEs.
* Validity depends on PMBR being valid (or being overridden by the
* 'gpt' kernel command line option) and finding either the Primary
* GPT header and PTEs valid, or the Alternate GPT header and PTEs
* valid. If the Primary GPT header is not valid, the Alternate GPT header
* is not checked unless the 'gpt' kernel command line option is passed.
* This protects against devices which misreport their size, and forces
* the user to decide to use the Alternate GPT.
*/
static rt_bool_t find_valid_gpt(struct rt_blk_disk *disk,
gpt_header **gpt, gpt_entry **ptes)
{
int good_pgpt = 0, good_agpt = 0, good_pmbr = 0;
gpt_header *pgpt = RT_NULL, *agpt = RT_NULL;
gpt_entry *pptes = RT_NULL, *aptes = RT_NULL;
legacy_mbr *legacymbr;
rt_size_t total_sectors = rt_blk_disk_get_capacity(disk);
rt_size_t lastlba;
if (!ptes)
{
return RT_FALSE;
}
lastlba = last_lba(disk);
if (!force_gpt)
{
/* This will be added to the EFI Spec. per Intel after v1.02. */
legacymbr = rt_malloc(sizeof(*legacymbr));
if (!legacymbr)
{
return RT_FALSE;
}
read_lba(disk, 0, (rt_uint8_t *)legacymbr, sizeof(*legacymbr));
good_pmbr = is_pmbr_valid(legacymbr, total_sectors);
rt_free(legacymbr);
if (!good_pmbr)
{
return RT_FALSE;
}
LOG_D("%s: Device has a %s MBR", to_disk_name(disk),
good_pmbr == GPT_MBR_PROTECTIVE ? "protective" : "hybrid");
}
good_pgpt = is_gpt_valid(disk, GPT_PRIMARY_PARTITION_TABLE_LBA, &pgpt, &pptes);
if (good_pgpt)
{
good_agpt = is_gpt_valid(disk, rt_le64_to_cpu(pgpt->alternate_lba), &agpt, &aptes);
}
if (!good_agpt && force_gpt)
{
good_agpt = is_gpt_valid(disk, lastlba, &agpt, &aptes);
}
/* The obviously unsuccessful case */
if (!good_pgpt && !good_agpt)
{
goto _fail;
}
compare_gpts(disk, pgpt, agpt, lastlba);
/* The good cases */
if (good_pgpt)
{
*gpt = pgpt;
*ptes = pptes;
rt_free(agpt);
rt_free(aptes);
if (!good_agpt)
{
LOG_D("%s: Alternate GPT is invalid, using primary GPT", to_disk_name(disk));
}
return RT_TRUE;
}
else if (good_agpt)
{
*gpt = agpt;
*ptes = aptes;
rt_free(pgpt);
rt_free(pptes);
LOG_D("%s: Primary GPT is invalid, using alternate GPT", to_disk_name(disk));
return RT_TRUE;
}
_fail:
rt_free(pgpt);
rt_free(agpt);
rt_free(pptes);
rt_free(aptes);
*gpt = RT_NULL;
*ptes = RT_NULL;
return RT_FALSE;
}
rt_err_t efi_partition(struct rt_blk_disk *disk)
{
rt_uint32_t entries_nr;
gpt_header *gpt = RT_NULL;
gpt_entry *ptes = RT_NULL;
if (!find_valid_gpt(disk, &gpt, &ptes) || !gpt || !ptes)
{
rt_free(gpt);
rt_free(ptes);
return -RT_EINVAL;
}
entries_nr = rt_le32_to_cpu(gpt->num_partition_entries);
for (int i = 0; i < entries_nr && i < disk->max_partitions; ++i)
{
rt_uint64_t start = rt_le64_to_cpu(ptes[i].starting_lba);
rt_uint64_t size = rt_le64_to_cpu(ptes[i].ending_lba) -
rt_le64_to_cpu(ptes[i].starting_lba) + 1ULL;
if (!is_pte_valid(&ptes[i], last_lba(disk)))
{
continue;
}
if (blk_put_partition(disk, "gpt", start, size, i) == -RT_ENOMEM)
{
break;
}
}
rt_free(gpt);
rt_free(ptes);
return RT_EOK;
}

View File

@@ -0,0 +1,141 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-05-05 linzhenxing first version
* 2023-02-25 GuEe-GUI make blk interface
*/
#ifndef __PARTITIONS_EFI_H__
#define __PARTITIONS_EFI_H__
#include "../blk_partition.h"
#include <drivers/misc.h>
#include <drivers/byteorder.h>
#define MSDOS_MBR_SIGNATURE 0xaa55
#define EFI_PMBR_OSTYPE_EFI 0xef
#define EFI_PMBR_OSTYPE_EFI_GPT 0xee
#define GPT_MBR_PROTECTIVE 1
#define GPT_MBR_HYBRID 2
#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
#define GPT_HEADER_REVISION_V1 0x00010000
#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
#ifndef __UUID_H__
#define UUID_SIZE 16
typedef struct
{
rt_uint8_t b[UUID_SIZE];
} guid_t;
#endif /* __UUID_H__ */
#ifndef __EFI_H__
typedef guid_t efi_guid_t rt_align(4);
#define EFI_GUID(a, b, c, d...) (efi_guid_t) \
{{ \
(a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
(c) & 0xff, ((c) >> 8) & 0xff, \
d \
}}
#define NULL_GUID \
EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
rt_inline int efi_guidcmp(efi_guid_t left, efi_guid_t right)
{
return rt_memcmp(&left, &right, sizeof (efi_guid_t));
}
#endif /* __EFI_H__ */
#define PARTITION_SYSTEM_GUID \
EFI_GUID(0xc12a7328, 0xf81f, 0x11d2, 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b)
#define LEGACY_MBR_PARTITION_GUID \
EFI_GUID(0x024dee41, 0x33e7, 0x11d3, 0x9d, 0x69, 0x00, 0x08, 0xc7, 0x81, 0xf3, 0x9f)
#define PARTITION_MSFT_RESERVED_GUID \
EFI_GUID(0xe3c9e316, 0x0b5c, 0x4db8, 0x81, 0x7d, 0xf9, 0x2d, 0xf0, 0x02, 0x15, 0xae)
#define PARTITION_BASIC_DATA_GUID \
EFI_GUID(0xebd0a0a2, 0xb9e5, 0x4433, 0x87, 0xc0, 0x68, 0xb6, 0xb7, 0x26, 0x99, 0xc7)
rt_packed(struct _gpt_header
{
rt_le64_t signature;
rt_le32_t revision;
rt_le32_t header_size;
rt_le32_t header_crc32;
rt_le32_t reserved1;
rt_le64_t start_lba;
rt_le64_t alternate_lba;
rt_le64_t first_usable_lba;
rt_le64_t last_usable_lba;
efi_guid_t disk_guid;
rt_le64_t partition_entry_lba;
rt_le32_t num_partition_entries;
rt_le32_t sizeof_partition_entry;
rt_le32_t partition_entry_array_crc32;
/*
* The rest of the logical block is reserved by UEFI and must be zero.
* EFI standard handles this by:
*
* uint8_t reserved2[BlockSize - 92];
*/
});
typedef struct _gpt_header gpt_header;
rt_packed(struct _gpt_entry_attributes
{
rt_uint64_t required_to_function:1;
rt_uint64_t reserved:47;
rt_uint64_t type_guid_specific:16;
});
typedef struct _gpt_entry_attributes gpt_entry_attributes;
rt_packed(struct _gpt_entry
{
efi_guid_t partition_type_guid;
efi_guid_t unique_partition_guid;
rt_le64_t starting_lba;
rt_le64_t ending_lba;
gpt_entry_attributes attributes;
rt_le16_t partition_name[72/sizeof(rt_le16_t)];
});
typedef struct _gpt_entry gpt_entry;
rt_packed(struct _gpt_mbr_record
{
rt_uint8_t boot_indicator; /* unused by EFI, set to 0x80 for bootable */
rt_uint8_t start_head; /* unused by EFI, pt start in CHS */
rt_uint8_t start_sector; /* unused by EFI, pt start in CHS */
rt_uint8_t start_track;
rt_uint8_t os_type; /* EFI and legacy non-EFI OS types */
rt_uint8_t end_head; /* unused by EFI, pt end in CHS */
rt_uint8_t end_sector; /* unused by EFI, pt end in CHS */
rt_uint8_t end_track; /* unused by EFI, pt end in CHS */
rt_le32_t starting_lba; /* used by EFI - start addr of the on disk pt */
rt_le32_t size_in_lba; /* used by EFI - size of pt in LBA */
});
typedef struct _gpt_mbr_record gpt_mbr_record;
rt_packed(struct _legacy_mbr
{
rt_uint8_t boot_code[440];
rt_le32_t unique_mbr_signature;
rt_le16_t unknown;
gpt_mbr_record partition_record[4];
rt_le16_t signature;
});
typedef struct _legacy_mbr legacy_mbr;
#endif /* __PARTITIONS_EFI_H__ */

View File

@@ -0,0 +1,974 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-05-14 aubrcool@qq.com first version
* 2015-07-06 Bernard code cleanup and remove RT_CAN_USING_LED;
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#define CAN_LOCK(can) rt_mutex_take(&(can->lock), RT_WAITING_FOREVER)
#define CAN_UNLOCK(can) rt_mutex_release(&(can->lock))
static rt_err_t rt_can_init(struct rt_device *dev)
{
rt_err_t result = RT_EOK;
struct rt_can_device *can;
RT_ASSERT(dev != RT_NULL);
can = (struct rt_can_device *)dev;
/* initialize rx/tx */
can->can_rx = RT_NULL;
can->can_tx = RT_NULL;
#ifdef RT_CAN_USING_HDR
can->hdr = RT_NULL;
#endif
/* apply configuration */
if (can->ops->configure)
result = can->ops->configure(can, &can->config);
else
result = -RT_ENOSYS;
return result;
}
/*
* can interrupt routines
*/
rt_inline rt_ssize_t _can_int_rx(struct rt_can_device *can, struct rt_can_msg *data, rt_ssize_t msgs)
{
rt_ssize_t size;
struct rt_can_rx_fifo *rx_fifo;
RT_ASSERT(can != RT_NULL);
size = msgs;
rx_fifo = (struct rt_can_rx_fifo *) can->can_rx;
RT_ASSERT(rx_fifo != RT_NULL);
/* read from software FIFO */
while (msgs / sizeof(struct rt_can_msg) > 0)
{
rt_base_t level;
#ifdef RT_CAN_USING_HDR
rt_int8_t hdr;
#endif /*RT_CAN_USING_HDR*/
struct rt_can_msg_list *listmsg = RT_NULL;
/* disable interrupt */
level = rt_hw_interrupt_disable();
#ifdef RT_CAN_USING_HDR
hdr = data->hdr_index;
if (hdr >= 0 && can->hdr && hdr < can->config.maxhdr && !rt_list_isempty(&can->hdr[hdr].list))
{
listmsg = rt_list_entry(can->hdr[hdr].list.next, struct rt_can_msg_list, hdrlist);
rt_list_remove(&listmsg->list);
rt_list_remove(&listmsg->hdrlist);
if (can->hdr[hdr].msgs)
{
can->hdr[hdr].msgs--;
}
listmsg->owner = RT_NULL;
}
else if (hdr == -1)
#endif /*RT_CAN_USING_HDR*/
{
if (!rt_list_isempty(&rx_fifo->uselist))
{
listmsg = rt_list_entry(rx_fifo->uselist.next, struct rt_can_msg_list, list);
rt_list_remove(&listmsg->list);
#ifdef RT_CAN_USING_HDR
rt_list_remove(&listmsg->hdrlist);
if (listmsg->owner != RT_NULL && listmsg->owner->msgs)
{
listmsg->owner->msgs--;
}
listmsg->owner = RT_NULL;
#endif /*RT_CAN_USING_HDR*/
}
else
{
/* no data, enable interrupt and break out */
rt_hw_interrupt_enable(level);
break;
}
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
if (listmsg != RT_NULL)
{
rt_memcpy(data, &listmsg->data, sizeof(struct rt_can_msg));
level = rt_hw_interrupt_disable();
rt_list_insert_before(&rx_fifo->freelist, &listmsg->list);
rx_fifo->freenumbers++;
RT_ASSERT(rx_fifo->freenumbers <= can->config.msgboxsz);
rt_hw_interrupt_enable(level);
listmsg = RT_NULL;
}
else
{
break;
}
data ++;
msgs -= sizeof(struct rt_can_msg);
}
return (size - msgs);
}
rt_inline int _can_int_tx(struct rt_can_device *can, const struct rt_can_msg *data, int msgs)
{
int size;
struct rt_can_tx_fifo *tx_fifo;
RT_ASSERT(can != RT_NULL);
size = msgs;
tx_fifo = (struct rt_can_tx_fifo *) can->can_tx;
RT_ASSERT(tx_fifo != RT_NULL);
while (msgs)
{
rt_base_t level;
rt_uint32_t no;
rt_uint32_t result;
struct rt_can_sndbxinx_list *tx_tosnd = RT_NULL;
rt_sem_take(&(tx_fifo->sem), RT_WAITING_FOREVER);
level = rt_hw_interrupt_disable();
tx_tosnd = rt_list_entry(tx_fifo->freelist.next, struct rt_can_sndbxinx_list, list);
RT_ASSERT(tx_tosnd != RT_NULL);
rt_list_remove(&tx_tosnd->list);
rt_hw_interrupt_enable(level);
no = ((rt_ubase_t)tx_tosnd - (rt_ubase_t)tx_fifo->buffer) / sizeof(struct rt_can_sndbxinx_list);
tx_tosnd->result = RT_CAN_SND_RESULT_WAIT;
rt_completion_init(&tx_tosnd->completion);
if (can->ops->sendmsg(can, data, no) != RT_EOK)
{
/* send failed. */
level = rt_hw_interrupt_disable();
rt_list_insert_before(&tx_fifo->freelist, &tx_tosnd->list);
rt_hw_interrupt_enable(level);
rt_sem_release(&(tx_fifo->sem));
goto err_ret;
}
can->status.sndchange = 1;
rt_completion_wait(&(tx_tosnd->completion), RT_WAITING_FOREVER);
level = rt_hw_interrupt_disable();
result = tx_tosnd->result;
if (!rt_list_isempty(&tx_tosnd->list))
{
rt_list_remove(&tx_tosnd->list);
}
rt_list_insert_before(&tx_fifo->freelist, &tx_tosnd->list);
rt_hw_interrupt_enable(level);
rt_sem_release(&(tx_fifo->sem));
if (result == RT_CAN_SND_RESULT_OK)
{
level = rt_hw_interrupt_disable();
can->status.sndpkg++;
rt_hw_interrupt_enable(level);
data ++;
msgs -= sizeof(struct rt_can_msg);
if (!msgs) break;
}
else
{
err_ret:
level = rt_hw_interrupt_disable();
can->status.dropedsndpkg++;
rt_hw_interrupt_enable(level);
break;
}
}
return (size - msgs);
}
rt_inline int _can_int_tx_priv(struct rt_can_device *can, const struct rt_can_msg *data, int msgs)
{
int size;
rt_base_t level;
rt_uint32_t no, result;
struct rt_can_tx_fifo *tx_fifo;
RT_ASSERT(can != RT_NULL);
size = msgs;
tx_fifo = (struct rt_can_tx_fifo *) can->can_tx;
RT_ASSERT(tx_fifo != RT_NULL);
while (msgs)
{
no = data->priv;
if (no >= can->config.sndboxnumber)
{
break;
}
level = rt_hw_interrupt_disable();
if ((tx_fifo->buffer[no].result != RT_CAN_SND_RESULT_OK))
{
rt_hw_interrupt_enable(level);
rt_completion_wait(&(tx_fifo->buffer[no].completion), RT_WAITING_FOREVER);
continue;
}
tx_fifo->buffer[no].result = RT_CAN_SND_RESULT_WAIT;
rt_hw_interrupt_enable(level);
if (can->ops->sendmsg(can, data, no) != RT_EOK)
{
continue;
}
can->status.sndchange = 1;
rt_completion_wait(&(tx_fifo->buffer[no].completion), RT_WAITING_FOREVER);
result = tx_fifo->buffer[no].result;
if (result == RT_CAN_SND_RESULT_OK)
{
level = rt_hw_interrupt_disable();
can->status.sndpkg++;
rt_hw_interrupt_enable(level);
data ++;
msgs -= sizeof(struct rt_can_msg);
if (!msgs) break;
}
else
{
level = rt_hw_interrupt_disable();
can->status.dropedsndpkg++;
rt_hw_interrupt_enable(level);
break;
}
}
return (size - msgs);
}
static rt_err_t rt_can_open(struct rt_device *dev, rt_uint16_t oflag)
{
struct rt_can_device *can;
char tmpname[16];
RT_ASSERT(dev != RT_NULL);
can = (struct rt_can_device *)dev;
CAN_LOCK(can);
/* get open flags */
dev->open_flag = oflag & 0xff;
if (can->can_rx == RT_NULL)
{
if (oflag & RT_DEVICE_FLAG_INT_RX)
{
int i = 0;
struct rt_can_rx_fifo *rx_fifo;
rx_fifo = (struct rt_can_rx_fifo *) rt_malloc(sizeof(struct rt_can_rx_fifo) +
can->config.msgboxsz * sizeof(struct rt_can_msg_list));
RT_ASSERT(rx_fifo != RT_NULL);
rx_fifo->buffer = (struct rt_can_msg_list *)(rx_fifo + 1);
rt_memset(rx_fifo->buffer, 0, can->config.msgboxsz * sizeof(struct rt_can_msg_list));
rt_list_init(&rx_fifo->freelist);
rt_list_init(&rx_fifo->uselist);
rx_fifo->freenumbers = can->config.msgboxsz;
for (i = 0; i < can->config.msgboxsz; i++)
{
rt_list_insert_before(&rx_fifo->freelist, &rx_fifo->buffer[i].list);
#ifdef RT_CAN_USING_HDR
rt_list_init(&rx_fifo->buffer[i].hdrlist);
rx_fifo->buffer[i].owner = RT_NULL;
#endif
}
can->can_rx = rx_fifo;
dev->open_flag |= RT_DEVICE_FLAG_INT_RX;
/* open can rx interrupt */
can->ops->control(can, RT_DEVICE_CTRL_SET_INT, (void *)RT_DEVICE_FLAG_INT_RX);
}
}
if (can->can_tx == RT_NULL)
{
if (oflag & RT_DEVICE_FLAG_INT_TX)
{
int i = 0;
struct rt_can_tx_fifo *tx_fifo;
tx_fifo = (struct rt_can_tx_fifo *) rt_malloc(sizeof(struct rt_can_tx_fifo) +
can->config.sndboxnumber * sizeof(struct rt_can_sndbxinx_list));
RT_ASSERT(tx_fifo != RT_NULL);
tx_fifo->buffer = (struct rt_can_sndbxinx_list *)(tx_fifo + 1);
rt_memset(tx_fifo->buffer, 0,
can->config.sndboxnumber * sizeof(struct rt_can_sndbxinx_list));
rt_list_init(&tx_fifo->freelist);
for (i = 0; i < can->config.sndboxnumber; i++)
{
rt_list_insert_before(&tx_fifo->freelist, &tx_fifo->buffer[i].list);
rt_completion_init(&(tx_fifo->buffer[i].completion));
tx_fifo->buffer[i].result = RT_CAN_SND_RESULT_OK;
}
rt_sprintf(tmpname, "%stl", dev->parent.name);
rt_sem_init(&(tx_fifo->sem), tmpname, can->config.sndboxnumber, RT_IPC_FLAG_FIFO);
can->can_tx = tx_fifo;
dev->open_flag |= RT_DEVICE_FLAG_INT_TX;
/* open can tx interrupt */
can->ops->control(can, RT_DEVICE_CTRL_SET_INT, (void *)RT_DEVICE_FLAG_INT_TX);
}
}
can->ops->control(can, RT_DEVICE_CTRL_SET_INT, (void *)RT_DEVICE_CAN_INT_ERR);
#ifdef RT_CAN_USING_HDR
if (can->hdr == RT_NULL)
{
int i = 0;
struct rt_can_hdr *phdr;
phdr = (struct rt_can_hdr *) rt_malloc(can->config.maxhdr * sizeof(struct rt_can_hdr));
RT_ASSERT(phdr != RT_NULL);
rt_memset(phdr, 0, can->config.maxhdr * sizeof(struct rt_can_hdr));
for (i = 0; i < can->config.maxhdr; i++)
{
rt_list_init(&phdr[i].list);
}
can->hdr = phdr;
}
#endif
if (!can->timerinitflag)
{
can->timerinitflag = 1;
rt_timer_start(&can->timer);
}
CAN_UNLOCK(can);
return RT_EOK;
}
static rt_err_t rt_can_close(struct rt_device *dev)
{
struct rt_can_device *can;
RT_ASSERT(dev != RT_NULL);
can = (struct rt_can_device *)dev;
CAN_LOCK(can);
/* this device has more reference count */
if (dev->ref_count > 1)
{
CAN_UNLOCK(can);
return RT_EOK;
}
if (can->timerinitflag)
{
can->timerinitflag = 0;
rt_timer_stop(&can->timer);
}
can->status_indicate.ind = RT_NULL;
can->status_indicate.args = RT_NULL;
#ifdef RT_CAN_USING_HDR
if (can->hdr != RT_NULL)
{
rt_free(can->hdr);
can->hdr = RT_NULL;
}
#endif
if (dev->open_flag & RT_DEVICE_FLAG_INT_RX)
{
struct rt_can_rx_fifo *rx_fifo;
/* clear can rx interrupt */
can->ops->control(can, RT_DEVICE_CTRL_CLR_INT, (void *)RT_DEVICE_FLAG_INT_RX);
rx_fifo = (struct rt_can_rx_fifo *)can->can_rx;
RT_ASSERT(rx_fifo != RT_NULL);
rt_free(rx_fifo);
dev->open_flag &= ~RT_DEVICE_FLAG_INT_RX;
can->can_rx = RT_NULL;
}
if (dev->open_flag & RT_DEVICE_FLAG_INT_TX)
{
struct rt_can_tx_fifo *tx_fifo;
/* clear can tx interrupt */
can->ops->control(can, RT_DEVICE_CTRL_CLR_INT, (void *)RT_DEVICE_FLAG_INT_TX);
tx_fifo = (struct rt_can_tx_fifo *)can->can_tx;
RT_ASSERT(tx_fifo != RT_NULL);
rt_sem_detach(&(tx_fifo->sem));
rt_free(tx_fifo);
dev->open_flag &= ~RT_DEVICE_FLAG_INT_TX;
can->can_tx = RT_NULL;
}
can->ops->control(can, RT_DEVICE_CTRL_CLR_INT, (void *)RT_DEVICE_CAN_INT_ERR);
can->ops->control(can, RT_CAN_CMD_START, RT_FALSE);
CAN_UNLOCK(can);
return RT_EOK;
}
static rt_ssize_t rt_can_read(struct rt_device *dev,
rt_off_t pos,
void *buffer,
rt_size_t size)
{
struct rt_can_device *can;
RT_ASSERT(dev != RT_NULL);
if (size == 0) return 0;
can = (struct rt_can_device *)dev;
if ((dev->open_flag & RT_DEVICE_FLAG_INT_RX) && (dev->ref_count > 0))
{
return _can_int_rx(can, buffer, size);
}
return 0;
}
static rt_ssize_t rt_can_write(struct rt_device *dev,
rt_off_t pos,
const void *buffer,
rt_size_t size)
{
struct rt_can_device *can;
RT_ASSERT(dev != RT_NULL);
if (size == 0) return 0;
can = (struct rt_can_device *)dev;
if ((dev->open_flag & RT_DEVICE_FLAG_INT_TX) && (dev->ref_count > 0))
{
if (can->config.privmode)
{
return _can_int_tx_priv(can, buffer, size);
}
else
{
return _can_int_tx(can, buffer, size);
}
}
return 0;
}
static rt_err_t rt_can_control(struct rt_device *dev,
int cmd,
void *args)
{
struct rt_can_device *can;
rt_err_t res;
res = RT_EOK;
RT_ASSERT(dev != RT_NULL);
can = (struct rt_can_device *)dev;
switch (cmd)
{
case RT_DEVICE_CTRL_SUSPEND:
/* suspend device */
dev->flag |= RT_DEVICE_FLAG_SUSPENDED;
break;
case RT_DEVICE_CTRL_RESUME:
/* resume device */
dev->flag &= ~RT_DEVICE_FLAG_SUSPENDED;
break;
case RT_DEVICE_CTRL_CONFIG:
/* configure device */
res = can->ops->configure(can, (struct can_configure *)args);
break;
case RT_CAN_CMD_SET_PRIV:
/* configure device */
if ((rt_uint32_t)(rt_ubase_t)args != can->config.privmode)
{
int i;
rt_base_t level;
struct rt_can_tx_fifo *tx_fifo;
res = can->ops->control(can, cmd, args);
if (res != RT_EOK) return res;
tx_fifo = (struct rt_can_tx_fifo *) can->can_tx;
if (can->config.privmode)
{
for (i = 0; i < can->config.sndboxnumber; i++)
{
level = rt_hw_interrupt_disable();
if(rt_list_isempty(&tx_fifo->buffer[i].list))
{
rt_sem_release(&(tx_fifo->sem));
}
else
{
rt_list_remove(&tx_fifo->buffer[i].list);
}
rt_hw_interrupt_enable(level);
}
}
else
{
for (i = 0; i < can->config.sndboxnumber; i++)
{
level = rt_hw_interrupt_disable();
if (tx_fifo->buffer[i].result == RT_CAN_SND_RESULT_OK)
{
rt_list_insert_before(&tx_fifo->freelist, &tx_fifo->buffer[i].list);
}
rt_hw_interrupt_enable(level);
}
}
}
break;
case RT_CAN_CMD_SET_STATUS_IND:
can->status_indicate.ind = ((rt_can_status_ind_type_t)args)->ind;
can->status_indicate.args = ((rt_can_status_ind_type_t)args)->args;
break;
#ifdef RT_CAN_USING_HDR
case RT_CAN_CMD_SET_FILTER:
res = can->ops->control(can, cmd, args);
if (res != RT_EOK || can->hdr == RT_NULL)
{
return res;
}
struct rt_can_filter_config *pfilter;
struct rt_can_filter_item *pitem;
rt_uint32_t count;
rt_base_t level;
pfilter = (struct rt_can_filter_config *)args;
RT_ASSERT(pfilter);
count = pfilter->count;
pitem = pfilter->items;
if (pfilter->actived)
{
while (count)
{
if (pitem->hdr_bank >= can->config.maxhdr || pitem->hdr_bank < 0)
{
count--;
pitem++;
continue;
}
level = rt_hw_interrupt_disable();
if (!can->hdr[pitem->hdr_bank].connected)
{
rt_hw_interrupt_enable(level);
rt_memcpy(&can->hdr[pitem->hdr_bank].filter, pitem,
sizeof(struct rt_can_filter_item));
level = rt_hw_interrupt_disable();
can->hdr[pitem->hdr_bank].connected = 1;
can->hdr[pitem->hdr_bank].msgs = 0;
rt_list_init(&can->hdr[pitem->hdr_bank].list);
}
rt_hw_interrupt_enable(level);
count--;
pitem++;
}
}
else
{
while (count)
{
if (pitem->hdr_bank >= can->config.maxhdr || pitem->hdr_bank < 0)
{
count--;
pitem++;
continue;
}
level = rt_hw_interrupt_disable();
if (can->hdr[pitem->hdr_bank].connected)
{
can->hdr[pitem->hdr_bank].connected = 0;
can->hdr[pitem->hdr_bank].msgs = 0;
if (!rt_list_isempty(&can->hdr[pitem->hdr_bank].list))
{
rt_list_remove(can->hdr[pitem->hdr_bank].list.next);
}
rt_hw_interrupt_enable(level);
rt_memset(&can->hdr[pitem->hdr_bank].filter, 0,
sizeof(struct rt_can_filter_item));
}
else
{
rt_hw_interrupt_enable(level);
}
count--;
pitem++;
}
}
break;
#endif /*RT_CAN_USING_HDR*/
#ifdef RT_CAN_USING_BUS_HOOK
case RT_CAN_CMD_SET_BUS_HOOK:
can->bus_hook = (rt_can_bus_hook) args;
break;
#endif /*RT_CAN_USING_BUS_HOOK*/
default :
/* control device */
if (can->ops->control != RT_NULL)
{
res = can->ops->control(can, cmd, args);
}
else
{
res = -RT_ENOSYS;
}
break;
}
return res;
}
/*
* can timer
*/
static void cantimeout(void *arg)
{
rt_can_t can;
can = (rt_can_t)arg;
RT_ASSERT(can);
rt_device_control((rt_device_t)can, RT_CAN_CMD_GET_STATUS, (void *)&can->status);
if (can->status_indicate.ind != RT_NULL)
{
can->status_indicate.ind(can, can->status_indicate.args);
}
#ifdef RT_CAN_USING_BUS_HOOK
if(can->bus_hook)
{
can->bus_hook(can);
}
#endif /*RT_CAN_USING_BUS_HOOK*/
if (can->timerinitflag == 1)
{
can->timerinitflag = 0xFF;
}
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops can_device_ops =
{
rt_can_init,
rt_can_open,
rt_can_close,
rt_can_read,
rt_can_write,
rt_can_control
};
#endif
/*
* can register
*/
rt_err_t rt_hw_can_register(struct rt_can_device *can,
const char *name,
const struct rt_can_ops *ops,
void *data)
{
struct rt_device *device;
RT_ASSERT(can != RT_NULL);
device = &(can->parent);
device->type = RT_Device_Class_CAN;
device->rx_indicate = RT_NULL;
device->tx_complete = RT_NULL;
#ifdef RT_CAN_USING_HDR
can->hdr = RT_NULL;
#endif
can->can_rx = RT_NULL;
can->can_tx = RT_NULL;
rt_mutex_init(&(can->lock), "can", RT_IPC_FLAG_PRIO);
#ifdef RT_CAN_USING_BUS_HOOK
can->bus_hook = RT_NULL;
#endif /*RT_CAN_USING_BUS_HOOK*/
#ifdef RT_USING_DEVICE_OPS
device->ops = &can_device_ops;
#else
device->init = rt_can_init;
device->open = rt_can_open;
device->close = rt_can_close;
device->read = rt_can_read;
device->write = rt_can_write;
device->control = rt_can_control;
#endif
can->ops = ops;
can->status_indicate.ind = RT_NULL;
can->status_indicate.args = RT_NULL;
rt_memset(&can->status, 0, sizeof(can->status));
device->user_data = data;
can->timerinitflag = 0;
rt_timer_init(&can->timer,
name,
cantimeout,
(void *)can,
can->config.ticks,
RT_TIMER_FLAG_PERIODIC);
/* register a character device */
return rt_device_register(device, name, RT_DEVICE_FLAG_RDWR);
}
/* ISR for can interrupt */
void rt_hw_can_isr(struct rt_can_device *can, int event)
{
switch (event & 0xff)
{
case RT_CAN_EVENT_RXOF_IND:
{
rt_base_t level;
level = rt_hw_interrupt_disable();
can->status.dropedrcvpkg++;
rt_hw_interrupt_enable(level);
}
case RT_CAN_EVENT_RX_IND:
{
struct rt_can_msg tmpmsg;
struct rt_can_rx_fifo *rx_fifo;
struct rt_can_msg_list *listmsg = RT_NULL;
#ifdef RT_CAN_USING_HDR
rt_int8_t hdr;
#endif
int ch = -1;
rt_base_t level;
rt_uint32_t no;
rx_fifo = (struct rt_can_rx_fifo *)can->can_rx;
RT_ASSERT(rx_fifo != RT_NULL);
/* interrupt mode receive */
RT_ASSERT(can->parent.open_flag & RT_DEVICE_FLAG_INT_RX);
no = event >> 8;
ch = can->ops->recvmsg(can, &tmpmsg, no);
if (ch == -1) break;
/* disable interrupt */
level = rt_hw_interrupt_disable();
can->status.rcvpkg++;
can->status.rcvchange = 1;
if (!rt_list_isempty(&rx_fifo->freelist))
{
listmsg = rt_list_entry(rx_fifo->freelist.next, struct rt_can_msg_list, list);
rt_list_remove(&listmsg->list);
#ifdef RT_CAN_USING_HDR
rt_list_remove(&listmsg->hdrlist);
if (listmsg->owner != RT_NULL && listmsg->owner->msgs)
{
listmsg->owner->msgs--;
}
listmsg->owner = RT_NULL;
#endif /*RT_CAN_USING_HDR*/
RT_ASSERT(rx_fifo->freenumbers > 0);
rx_fifo->freenumbers--;
}
else if (!rt_list_isempty(&rx_fifo->uselist))
{
listmsg = rt_list_entry(rx_fifo->uselist.next, struct rt_can_msg_list, list);
can->status.dropedrcvpkg++;
rt_list_remove(&listmsg->list);
#ifdef RT_CAN_USING_HDR
rt_list_remove(&listmsg->hdrlist);
if (listmsg->owner != RT_NULL && listmsg->owner->msgs)
{
listmsg->owner->msgs--;
}
listmsg->owner = RT_NULL;
#endif
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
if (listmsg != RT_NULL)
{
rt_memcpy(&listmsg->data, &tmpmsg, sizeof(struct rt_can_msg));
level = rt_hw_interrupt_disable();
rt_list_insert_before(&rx_fifo->uselist, &listmsg->list);
#ifdef RT_CAN_USING_HDR
hdr = tmpmsg.hdr_index;
if (can->hdr != RT_NULL)
{
RT_ASSERT(hdr < can->config.maxhdr && hdr >= 0);
if (can->hdr[hdr].connected)
{
rt_list_insert_before(&can->hdr[hdr].list, &listmsg->hdrlist);
listmsg->owner = &can->hdr[hdr];
can->hdr[hdr].msgs++;
}
}
#endif
rt_hw_interrupt_enable(level);
}
/* invoke callback */
#ifdef RT_CAN_USING_HDR
if (can->hdr != RT_NULL && can->hdr[hdr].connected && can->hdr[hdr].filter.ind)
{
rt_size_t rx_length;
RT_ASSERT(hdr < can->config.maxhdr && hdr >= 0);
level = rt_hw_interrupt_disable();
rx_length = can->hdr[hdr].msgs * sizeof(struct rt_can_msg);
rt_hw_interrupt_enable(level);
if (rx_length)
{
can->hdr[hdr].filter.ind(&can->parent, can->hdr[hdr].filter.args, hdr, rx_length);
}
}
else
#endif
{
if (can->parent.rx_indicate != RT_NULL)
{
rt_size_t rx_length;
level = rt_hw_interrupt_disable();
/* get rx length */
rx_length = rt_list_len(&rx_fifo->uselist)* sizeof(struct rt_can_msg);
rt_hw_interrupt_enable(level);
if (rx_length)
{
can->parent.rx_indicate(&can->parent, rx_length);
}
}
}
break;
}
case RT_CAN_EVENT_TX_DONE:
case RT_CAN_EVENT_TX_FAIL:
{
struct rt_can_tx_fifo *tx_fifo;
rt_uint32_t no;
no = event >> 8;
tx_fifo = (struct rt_can_tx_fifo *) can->can_tx;
RT_ASSERT(tx_fifo != RT_NULL);
if ((event & 0xff) == RT_CAN_EVENT_TX_DONE)
{
tx_fifo->buffer[no].result = RT_CAN_SND_RESULT_OK;
}
else
{
tx_fifo->buffer[no].result = RT_CAN_SND_RESULT_ERR;
}
rt_completion_done(&(tx_fifo->buffer[no].completion));
break;
}
}
}
#ifdef RT_USING_FINSH
#include <finsh.h>
int cmd_canstat(int argc, void **argv)
{
static const char *ErrCode[] =
{
"No Error!",
"Warning !",
"Passive !",
"Bus Off !"
};
if (argc >= 2)
{
struct rt_can_status status;
rt_device_t candev = rt_device_find(argv[1]);
if (!candev)
{
rt_kprintf(" Can't find can device %s\n", argv[1]);
return -1;
}
rt_kprintf(" Found can device: %s...", argv[1]);
rt_device_control(candev, RT_CAN_CMD_GET_STATUS, &status);
rt_kprintf("\n Receive...error..count: %010ld. Send.....error....count: %010ld.",
status.rcverrcnt, status.snderrcnt);
rt_kprintf("\n Bit..pad..error..count: %010ld. Format...error....count: %010ld",
status.bitpaderrcnt, status.formaterrcnt);
rt_kprintf("\n Ack.......error..count: %010ld. Bit......error....count: %010ld.",
status.ackerrcnt, status.biterrcnt);
rt_kprintf("\n CRC.......error..count: %010ld. Error.code.[%010ld]: ",
status.crcerrcnt, status.errcode);
switch (status.errcode)
{
case 0:
rt_kprintf("%s.", ErrCode[0]);
break;
case 1:
rt_kprintf("%s.", ErrCode[1]);
break;
case 2:
case 3:
rt_kprintf("%s.", ErrCode[2]);
break;
case 4:
case 5:
case 6:
case 7:
rt_kprintf("%s.", ErrCode[3]);
break;
}
rt_kprintf("\n Total.receive.packages: %010ld. Dropped.receive.packages: %010ld.",
status.rcvpkg, status.dropedrcvpkg);
rt_kprintf("\n Total..send...packages: %010ld. Dropped...send..packages: %010ld.\n",
status.sndpkg + status.dropedsndpkg, status.dropedsndpkg);
}
else
{
rt_kprintf(" Invalid Call %s\n", argv[0]);
rt_kprintf(" Please using %s cannamex .Here canname is driver name and x is candrive number.\n", argv[0]);
}
return 0;
}
MSH_CMD_EXPORT_ALIAS(cmd_canstat, canstat, stat can device status);
#endif

View File

@@ -3,3 +3,7 @@ menuconfig RT_USING_CLK
depends on RT_USING_DM
select RT_USING_ADT_REF
default y
if RT_USING_CLK
osource "$(SOC_DM_CLK_DIR)/Kconfig"
endif

View File

@@ -55,6 +55,10 @@ static struct rt_clk *clk_alloc(struct rt_clk_node *clk_np, const char *dev_id,
clk->fw_node = fw_node;
}
else
{
clk = rt_err_ptr(-RT_ENOMEM);
}
return clk;
}
@@ -76,7 +80,7 @@ static struct rt_clk *clk_create(struct rt_clk_node *clk_np, const char *dev_id,
{
struct rt_clk *clk = clk_alloc(clk_np, dev_id, con_id, fw_node);
if (clk)
if (!rt_is_err(clk))
{
clk_get(clk_np);
@@ -135,15 +139,6 @@ rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_
struct rt_clk *clk = RT_NULL;
if (clk_np)
{
clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL);
}
else
{
err = -RT_EINVAL;
}
if (!err && clk_np)
{
clk_np->clk = clk;
@@ -152,6 +147,12 @@ rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_
clk_np->ops = &unused_clk_ops;
}
#if RT_NAME_MAX > 0
rt_strncpy(clk_np->rt_parent.name, RT_CLK_NODE_OBJ_NAME, RT_NAME_MAX);
#else
clk_np->rt_parent.name = RT_CLK_NODE_OBJ_NAME;
#endif
rt_ref_init(&clk_np->ref);
rt_list_init(&clk_np->list);
rt_list_init(&clk_np->children_nodes);
@@ -159,7 +160,16 @@ rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_
if (parent_np)
{
clk_set_parent(clk_np, parent_np);
clk_np->clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL);
if (clk_np->clk)
{
clk_set_parent(clk_np, parent_np);
}
else
{
err = -RT_ENOMEM;
}
}
else
{
@@ -265,11 +275,16 @@ static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
clk_prepare(clk_np->clk, clk_np->parent);
}
if (clk_np->ops->prepare)
if (clk->prepare_count == 0 && clk_np->ops->prepare)
{
err = clk_np->ops->prepare(clk);
}
if (!err)
{
++clk->prepare_count;
}
return err;
}
@@ -287,10 +302,6 @@ rt_err_t rt_clk_prepare(struct rt_clk *clk)
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -302,10 +313,14 @@ static void clk_unprepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
clk_unprepare(clk_np->clk, clk_np->parent);
}
if (clk_np->ops->unprepare)
if (clk->prepare_count == 1 && clk_np->ops->unprepare)
{
clk_np->ops->unprepare(clk);
}
if (clk->prepare_count)
{
--clk->prepare_count;
}
}
rt_err_t rt_clk_unprepare(struct rt_clk *clk)
@@ -322,10 +337,6 @@ rt_err_t rt_clk_unprepare(struct rt_clk *clk)
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -339,11 +350,16 @@ static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np)
clk_enable(clk_np->clk, clk_np->parent);
}
if (clk_np->ops->enable)
if (clk->enable_count == 0 && clk_np->ops->enable)
{
err = clk_np->ops->enable(clk);
}
if (!err)
{
++clk->enable_count;
}
return err;
}
@@ -359,10 +375,6 @@ rt_err_t rt_clk_enable(struct rt_clk *clk)
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -374,10 +386,14 @@ static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np)
clk_disable(clk_np->clk, clk_np->parent);
}
if (clk_np->ops->disable)
if (clk->enable_count == 1 && clk_np->ops->disable)
{
clk_np->ops->disable(clk);
}
if (clk->enable_count)
{
--clk->enable_count;
}
}
void rt_clk_disable(struct rt_clk *clk)
@@ -394,7 +410,7 @@ void rt_clk_disable(struct rt_clk *clk)
rt_err_t rt_clk_prepare_enable(struct rt_clk *clk)
{
rt_err_t err;
rt_err_t err = RT_EOK;
RT_DEBUG_NOT_IN_INTERRUPT;
@@ -412,10 +428,6 @@ rt_err_t rt_clk_prepare_enable(struct rt_clk *clk)
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -453,10 +465,6 @@ rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr)
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -478,10 +486,6 @@ rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr)
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -508,10 +512,6 @@ rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr)
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -529,29 +529,16 @@ void rt_clk_array_disable(struct rt_clk_array *clk_arr)
rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr)
{
rt_err_t err = RT_EOK;
rt_err_t err;
if (clk_arr)
if ((err = rt_clk_array_prepare(clk_arr)))
{
for (int i = 0; i < clk_arr->count; ++i)
{
if ((err = rt_clk_prepare_enable(clk_arr->clks[i])))
{
LOG_E("CLK Array[%d] %s failed error = %s", i,
"prepare_enable", rt_strerror(err));
while (i --> 0)
{
rt_clk_disable_unprepare(clk_arr->clks[i]);
}
break;
}
}
return err;
}
else
if ((err = rt_clk_array_enable(clk_arr)))
{
err = -RT_EINVAL;
rt_clk_array_unprepare(clk_arr);
}
return err;
@@ -559,13 +546,8 @@ rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr)
void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr)
{
if (clk_arr)
{
for (int i = 0; i < clk_arr->count; ++i)
{
rt_clk_disable_unprepare(clk_arr->clks[i]);
}
}
rt_clk_array_disable(clk_arr);
rt_clk_array_unprepare(clk_arr);
}
rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max)
@@ -604,10 +586,6 @@ rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t ma
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -622,10 +600,6 @@ rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate)
err = rt_clk_set_rate_range(clk, rate, clk_np->max_rate);
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -640,10 +614,6 @@ rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate)
err = rt_clk_set_rate_range(clk, clk_np->min_rate, rate);
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -652,7 +622,9 @@ rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate)
{
rt_err_t err = RT_EOK;
if (clk && clk->clk_np)
rate = rt_clk_round_rate(clk, rate);
if (clk && clk->clk_np && rate > 0)
{
struct rt_clk_node *clk_np = clk->clk_np;
@@ -690,17 +662,13 @@ rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate)
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_ubase_t rt_clk_get_rate(struct rt_clk *clk)
{
rt_ubase_t rate = -1UL;
rt_ubase_t rate = 0;
if (clk)
{
@@ -729,10 +697,6 @@ rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees)
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -749,38 +713,49 @@ rt_base_t rt_clk_get_phase(struct rt_clk *clk)
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
res = -RT_EINVAL;
}
return res;
}
rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate)
{
rt_base_t res = RT_EOK;
rt_base_t res = -RT_EINVAL;
if (clk && clk->clk_np && clk->clk_np->ops->round_rate)
if (clk && clk->clk_np)
{
rt_ubase_t best_parent_rate;
struct rt_clk_node *clk_np = clk->clk_np;
rt_hw_spin_lock(&_clk_lock.lock);
if (clk_np->min_rate && clk_np->max_rate)
if (clk_np->ops->round_rate)
{
rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate);
rt_ubase_t best_parent_rate;
rt_hw_spin_lock(&_clk_lock.lock);
if (clk_np->min_rate && clk_np->max_rate)
{
rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate);
}
res = clk_np->ops->round_rate(clk, rate, &best_parent_rate);
(void)best_parent_rate;
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
if (rate < clk_np->min_rate)
{
res = clk_np->min_rate;
}
else if (rate > clk_np->max_rate)
{
res = clk_np->max_rate;
}
else
{
res = rate;
}
}
res = clk->clk_np->ops->round_rate(clk, rate, &best_parent_rate);
(void)best_parent_rate;
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
res = -RT_EINVAL;
}
return res;
@@ -798,10 +773,6 @@ rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent)
rt_hw_spin_unlock(&_clk_lock.lock);
}
else
{
err = -RT_EINVAL;
}
return err;
}
@@ -887,7 +858,7 @@ void rt_clk_put(struct rt_clk *clk)
}
#ifdef RT_USING_OFW
static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name)
static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name, rt_bool_t locked)
{
struct rt_clk *clk = RT_NULL;
struct rt_ofw_cell_args clk_args;
@@ -895,10 +866,32 @@ static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, con
if (!rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args))
{
int count;
struct rt_object *obj;
struct rt_clk_node *clk_np = RT_NULL;
struct rt_ofw_node *clk_ofw_np = clk_args.data;
struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
count = rt_ofw_count_of_clk(clk_ofw_np);
if (!rt_ofw_data(clk_ofw_np))
{
if (locked)
{
rt_hw_spin_unlock(&_clk_lock.lock);
}
rt_platform_ofw_request(clk_ofw_np);
if (locked)
{
rt_hw_spin_lock(&_clk_lock.lock);
}
}
if (rt_ofw_data(clk_ofw_np) && (obj = rt_ofw_parse_object(clk_ofw_np,
RT_CLK_NODE_OBJ_NAME, "#clock-cells")))
{
clk_np = rt_container_of(obj, struct rt_clk_node, rt_parent);
count = rt_ofw_count_of_clk(clk_ofw_np);
}
rt_ofw_node_put(clk_ofw_np);
@@ -912,6 +905,10 @@ static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, con
clk = clk_create(clk_np, np->full_name, name, &clk_args, np);
}
else
{
clk = rt_err_ptr(-RT_ERROR);
}
}
return clk;
@@ -923,7 +920,7 @@ static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char
rt_hw_spin_lock(&_clk_lock.lock);
clk = ofw_get_clk_no_lock(np, index, name);
clk = ofw_get_clk_no_lock(np, index, name, RT_TRUE);
rt_hw_spin_unlock(&_clk_lock.lock);
@@ -935,6 +932,11 @@ struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
int count;
struct rt_clk_array *clk_arr = RT_NULL;
if (!np)
{
return rt_err_ptr(-RT_EINVAL);
}
if ((count = rt_ofw_count_phandle_cells(np, "clocks", "#clock-cells")) > 0)
{
clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count);
@@ -942,6 +944,7 @@ struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
if (clk_arr)
{
int i;
rt_err_t err = RT_EOK;
rt_bool_t has_name = rt_ofw_prop_read_bool(np, "clock-names");
clk_arr->count = count;
@@ -957,10 +960,12 @@ struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
rt_ofw_prop_read_string_index(np, "clock-names", i, &name);
}
clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name);
clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name, RT_FALSE);
if (!clk_arr->clks[i])
if (rt_is_err(clk_arr->clks[i]))
{
err = rt_ptr_err(clk_arr->clks[i]);
--i;
break;
}
@@ -971,7 +976,7 @@ struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
if (i > 0 && i < count)
{
rt_clk_array_put(clk_arr);
clk_arr = RT_NULL;
clk_arr = rt_err_ptr(err);
}
}
}

View File

@@ -360,9 +360,9 @@ rt_err_t rt_bus_remove_device(rt_device_t dev)
}
else if (drv)
{
if (drv->shutdown)
if (drv->remove)
{
err = drv->shutdown(dev);
err = drv->remove(dev);
}
/* device and driver are in the same bus */

View File

@@ -13,6 +13,8 @@
* 2013-07-09 Grissiom add ref_count support
* 2016-04-02 Bernard fix the open_flag initialization issue.
* 2021-03-19 Meco Man remove rt_device_init_all()
* 2024-09-15 milo fix log format issue
* fix reopen with a different oflag issue
*/
#include <rtthread.h>
@@ -29,9 +31,9 @@
#include <rtdevice.h> /* for wqueue_init */
#endif /* RT_USING_POSIX_DEVIO */
#ifdef RT_USING_DFS_V2
#if defined (RT_USING_DFS_V2) && defined (RT_USING_DFS_DEVFS)
#include <devfs.h>
#endif /* RT_USING_DFS_V2 */
#endif /* RT_USING_DFS_V2 RT_USING_DFS_DEVFS */
#ifdef RT_USING_DEVICE
@@ -82,7 +84,7 @@ rt_err_t rt_device_register(rt_device_t dev,
rt_wqueue_init(&(dev->wait_queue));
#endif /* RT_USING_POSIX_DEVIO */
#ifdef RT_USING_DFS_V2
#if defined (RT_USING_DFS_V2) && defined (RT_USING_DFS_DEVFS)
dfs_devfs_device_add(dev);
#endif /* RT_USING_DFS_V2 */
@@ -163,7 +165,7 @@ void rt_device_destroy(rt_device_t dev)
{
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Null);
RT_ASSERT(rt_object_is_systemobject(&dev->parent) == RT_FALSE);
rt_object_detach(&(dev->parent));
@@ -195,8 +197,8 @@ rt_err_t rt_device_init(rt_device_t dev)
result = device_init(dev);
if (result != RT_EOK)
{
LOG_E("To initialize device:%s failed. The error code is %d",
dev->parent.name, result);
LOG_E("To initialize device:%.*s failed. The error code is %d",
RT_NAME_MAX, dev->parent.name, result);
}
else
{
@@ -233,8 +235,8 @@ rt_err_t rt_device_open(rt_device_t dev, rt_uint16_t oflag)
result = device_init(dev);
if (result != RT_EOK)
{
LOG_E("To initialize device:%s failed. The error code is %d",
dev->parent.name, result);
LOG_E("To initialize device:%.*s failed. The error code is %d",
RT_NAME_MAX, dev->parent.name, result);
return result;
}
@@ -252,7 +254,7 @@ rt_err_t rt_device_open(rt_device_t dev, rt_uint16_t oflag)
/* device is not opened or opened by other oflag, call device_open interface */
if (!(dev->open_flag & RT_DEVICE_OFLAG_OPEN) ||
((dev->open_flag & RT_DEVICE_OFLAG_MASK) != (oflag & RT_DEVICE_OFLAG_MASK)))
((dev->open_flag & RT_DEVICE_OFLAG_MASK) != ((oflag & RT_DEVICE_OFLAG_MASK) | RT_DEVICE_OFLAG_OPEN)))
{
if (device_open != RT_NULL)
{

View File

@@ -53,6 +53,137 @@ void rt_dm_secondary_cpu_init(void)
}
#endif /* RT_USING_SMP */
/**
* @brief This function will alloc an id in an IDA object
*
* @param ida is the IDA object
*
* @return the id or -RT_EEMPTY
*/
int rt_dm_ida_alloc(struct rt_dm_ida *ida)
{
int id;
RT_ASSERT(ida != RT_NULL);
rt_spin_lock(&ida->lock);
id = rt_bitmap_next_clear_bit(ida->map, 0, RT_DM_IDA_NUM);
if (id != RT_DM_IDA_NUM)
{
rt_bitmap_set_bit(ida->map, id);
}
rt_spin_unlock(&ida->lock);
if (id != RT_DM_IDA_NUM)
{
return id;
}
return -RT_EEMPTY;
}
/**
* @brief This function will take (force) an id in an IDA object
*
* @param ida is the IDA object
*
* @param id is the id that want to take
*
* @return the result of taking
*/
rt_bool_t rt_dm_ida_take(struct rt_dm_ida *ida, int id)
{
RT_ASSERT(ida != RT_NULL);
RT_ASSERT(id >= 0);
rt_spin_lock(&ida->lock);
if (!rt_bitmap_test_bit(ida->map, id))
{
rt_bitmap_set_bit(ida->map, id);
}
else
{
id = RT_DM_IDA_NUM;
}
rt_spin_unlock(&ida->lock);
return id != RT_DM_IDA_NUM;
}
/**
* @brief This function will release an id in an IDA object
*
* @param ida is the IDA object
*
* @param id is the id of IDA object
*/
void rt_dm_ida_free(struct rt_dm_ida *ida, int id)
{
RT_ASSERT(ida != RT_NULL);
RT_ASSERT(id >= 0);
rt_spin_lock(&ida->lock);
rt_bitmap_clear_bit(ida->map, id);
rt_spin_unlock(&ida->lock);
}
/**
* @brief This function will return the specified master id and device id of device.
*
* @param master_id is the master id (0, 255] of device
*
* @param device_id is the device id [-1, 255] of device, when device_id is -1,
* the function will end when find the first device.
*
* @return the device object or RT_NULL
*/
rt_device_t rt_dm_device_find(int master_id, int device_id)
{
struct rt_device *dev, *ret_dev = RT_NULL;
struct rt_object_information *information = RT_NULL;
if (master_id <= 0 || device_id > 255)
{
return RT_NULL;
}
information = rt_object_get_information(RT_Object_Class_Device);
/* parameter check */
if (!information)
{
return RT_NULL;
}
/* which is invoke in interrupt status */
RT_DEBUG_NOT_IN_INTERRUPT;
/* enter critical */
rt_enter_critical();
/* try to find object */
rt_list_for_each_entry(dev, &information->object_list, parent.list)
{
if (master_id == dev->master_id &&
(device_id == -1 || device_id == dev->device_id))
{
ret_dev = dev;
break;
}
}
/* leave critical */
rt_exit_critical();
return ret_dev;
}
struct prefix_track
{
rt_list_t list;

View File

@@ -78,21 +78,21 @@ static rt_bool_t platform_match(rt_driver_t drv, rt_device_t dev)
{
struct rt_platform_driver *pdrv = rt_container_of(drv, struct rt_platform_driver, parent);
struct rt_platform_device *pdev = rt_container_of(dev, struct rt_platform_device, parent);
#ifdef RT_USING_OFW
struct rt_ofw_node *np = dev->ofw_node;
/* 1、match with ofw node */
if (np)
{
#ifdef RT_USING_OFW
pdev->id = rt_ofw_node_match(np, pdrv->ids);
#else
pdev->id = RT_NULL;
#endif
if (pdev->id)
{
return RT_TRUE;
}
}
#endif
/* 2、match with name */
if (pdev->name && pdrv->name)
@@ -123,7 +123,13 @@ static rt_err_t platform_probe(rt_device_t dev)
if (err && err != -RT_EEMPTY)
{
LOG_E("Attach power domain error = %s in device %s", pdev->name, rt_strerror(err));
LOG_E("Attach power domain error = %s in device %s", rt_strerror(err),
#ifdef RT_USING_OFW
(pdev->name && pdev->name[0]) ? pdev->name : rt_ofw_node_full_name(np)
#else
pdev->name
#endif
);
return err;
}

View File

@@ -17,6 +17,7 @@
#include <drivers/ofw_io.h>
#include <drivers/ofw_fdt.h>
#include <drivers/platform.h>
#include <drivers/core/bus.h>
#include <drivers/core/dm.h>
#include "../ofw/ofw_internal.h"
@@ -161,6 +162,7 @@ static rt_err_t platform_ofw_device_probe_once(struct rt_ofw_node *parent_np)
}
pdev->dev_id = ofw_alias_node_id(np);
np->dev = &pdev->parent;
LOG_D("%s register to bus", np->full_name);
rt_platform_device_register(pdev);
@@ -199,6 +201,53 @@ rt_err_t rt_platform_ofw_device_probe_child(struct rt_ofw_node *np)
return err;
}
rt_err_t rt_platform_ofw_request(struct rt_ofw_node *np)
{
rt_err_t err;
if (np)
{
struct rt_device *dev = np->dev;
if (dev)
{
/* Was create */
if (dev->drv)
{
/* Was probe OK */
err = RT_EOK;
}
else
{
err = rt_bus_reload_driver_device(dev->bus, dev);
}
}
else
{
struct rt_platform_device *pdev = alloc_ofw_platform_device(np);
if (pdev)
{
pdev->dev_id = ofw_alias_node_id(np);
np->dev = &pdev->parent;
LOG_D("%s register to bus", np->full_name);
err = rt_platform_device_register(pdev);
}
else
{
err = -RT_ENOMEM;
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
static int platform_ofw_device_probe(void)
{
rt_err_t err = RT_EOK;
@@ -206,6 +255,8 @@ static int platform_ofw_device_probe(void)
if (ofw_node_root)
{
rt_ofw_node_get(ofw_node_root);
err = platform_ofw_device_probe_once(ofw_node_root);
rt_ofw_node_put(ofw_node_root);
@@ -216,11 +267,19 @@ static int platform_ofw_device_probe(void)
rt_ofw_node_put(node);
}
if ((node = rt_ofw_find_node_by_path("/clocks")))
{
platform_ofw_device_probe_once(node);
rt_ofw_node_put(node);
}
rt_ofw_node_get(ofw_node_chosen);
if ((node = rt_ofw_get_child_by_compatible(ofw_node_chosen, "simple-framebuffer")))
{
platform_ofw_device_probe_once(node);
rt_ofw_node_put(node);
}
rt_ofw_node_get(ofw_node_chosen);
}
else
{
@@ -244,7 +303,7 @@ rt_err_t rt_platform_ofw_free(struct rt_platform_device *pdev)
rt_ofw_node_clear_flag(np, RT_OFW_F_PLATFORM);
rt_ofw_node_put(np);
pdev->parent.ofw_node = RT_NULL;
rt_free(pdev);
}
}
else

View File

@@ -0,0 +1,10 @@
menuconfig RT_USING_DMA
bool "Using Direct Memory Access (DMA)"
depends on RT_USING_DM
select RT_USING_ADT
select RT_USING_ADT_BITMAP
default n
if RT_USING_DMA
osource "$(SOC_DM_DMA_DIR)/Kconfig"
endif

View File

@@ -0,0 +1,15 @@
from building import *
group = []
if not GetDepend(['RT_USING_DMA']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../include']
src = ['dma.c', 'dma_pool.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,589 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#define DBG_TAG "rtdm.dma"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static rt_list_t dmac_nodes = RT_LIST_OBJECT_INIT(dmac_nodes);
static struct rt_spinlock dmac_nodes_lock = {};
rt_err_t rt_dma_controller_register(struct rt_dma_controller *ctrl)
{
const char *dev_name;
char dma_name[RT_NAME_MAX];
if (!ctrl || !ctrl->dev || !ctrl->ops)
{
return -RT_EINVAL;
}
dev_name = rt_dm_dev_get_name(ctrl->dev);
if (rt_bitmap_next_set_bit(ctrl->dir_cap, 0, RT_DMA_DIR_MAX) == RT_DMA_DIR_MAX)
{
LOG_E("%s: Not direction capability", dev_name);
return -RT_EINVAL;
}
rt_snprintf(dma_name, sizeof(dma_name), "%s-dmac", dev_name);
rt_list_init(&ctrl->list);
rt_spin_lock(&dmac_nodes_lock);
rt_list_insert_before(&dmac_nodes, &ctrl->list);
rt_spin_unlock(&dmac_nodes_lock);
rt_list_init(&ctrl->channels_nodes);
rt_mutex_init(&ctrl->mutex, dma_name, RT_IPC_FLAG_PRIO);
if (ctrl->dev->ofw_node)
{
rt_dm_dev_bind_fwdata(ctrl->dev, RT_NULL, ctrl);
}
return RT_EOK;
}
rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl)
{
if (!ctrl)
{
return -RT_EINVAL;
}
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
if (!rt_list_isempty(&ctrl->channels_nodes))
{
rt_mutex_release(&ctrl->mutex);
return -RT_EBUSY;
}
if (ctrl->dev->ofw_node)
{
rt_dm_dev_unbind_fwdata(ctrl->dev, RT_NULL);
}
rt_mutex_release(&ctrl->mutex);
rt_mutex_detach(&ctrl->mutex);
rt_spin_lock(&dmac_nodes_lock);
rt_list_remove(&ctrl->list);
rt_spin_unlock(&dmac_nodes_lock);
return RT_EOK;
}
rt_err_t rt_dma_chan_start(struct rt_dma_chan *chan)
{
rt_err_t err;
struct rt_dma_controller *ctrl;
if (!chan)
{
return -RT_EINVAL;
}
if (chan->prep_err)
{
LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
return chan->prep_err;
}
ctrl = chan->ctrl;
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
err = ctrl->ops->start(chan);
rt_mutex_release(&ctrl->mutex);
return err;
}
rt_err_t rt_dma_chan_stop(struct rt_dma_chan *chan)
{
rt_err_t err;
struct rt_dma_controller *ctrl;
if (!chan)
{
return -RT_EINVAL;
}
if (chan->prep_err)
{
LOG_D("%s: Not prepare done", rt_dm_dev_get_name(chan->slave));
return chan->prep_err;
}
ctrl = chan->ctrl;
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
err = ctrl->ops->stop(chan);
rt_mutex_release(&ctrl->mutex);
return err;
}
rt_err_t rt_dma_chan_config(struct rt_dma_chan *chan,
struct rt_dma_slave_config *conf)
{
rt_err_t err;
struct rt_dma_controller *ctrl;
enum rt_dma_transfer_direction dir;
if (!chan || !conf)
{
err = -RT_EINVAL;
goto _end;
}
dir = conf->direction;
if (dir >= RT_DMA_DIR_MAX)
{
err = -RT_EINVAL;
goto _end;
}
if (conf->src_addr_width >= RT_DMA_SLAVE_BUSWIDTH_BYTES_MAX ||
conf->dst_addr_width >= RT_DMA_SLAVE_BUSWIDTH_BYTES_MAX)
{
err = -RT_EINVAL;
goto _end;
}
ctrl = chan->ctrl;
if (!rt_bitmap_test_bit(ctrl->dir_cap, dir))
{
err = -RT_ENOSYS;
goto _end;
}
if (!chan->name && dir != RT_DMA_MEM_TO_MEM)
{
LOG_E("%s: illegal config for uname channels",
rt_dm_dev_get_name(ctrl->dev));
err = -RT_EINVAL;
goto _end;
}
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
err = ctrl->ops->config(chan, conf);
rt_mutex_release(&ctrl->mutex);
if (!err)
{
rt_memcpy(&chan->conf, conf, sizeof(*conf));
}
_end:
chan->conf_err = err;
return err;
}
rt_err_t rt_dma_chan_done(struct rt_dma_chan *chan, rt_size_t size)
{
if (!chan)
{
return -RT_EINVAL;
}
if (chan->callback)
{
chan->callback(chan, size);
}
return RT_EOK;
}
static rt_bool_t range_is_illegal(const char *name, const char *desc,
rt_ubase_t addr0, rt_ubase_t addr1)
{
rt_bool_t illegal = addr0 < addr1;
if (illegal)
{
LOG_E("%s: %s %p is out of config %p", name, desc, addr0, addr1);
}
return illegal;
}
rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
struct rt_dma_slave_transfer *transfer)
{
rt_err_t err;
rt_size_t len;
rt_ubase_t dma_addr_src, dma_addr_dst;
struct rt_dma_controller *ctrl;
struct rt_dma_slave_config *conf;
if (!chan || !transfer)
{
return -RT_EINVAL;
}
ctrl = chan->ctrl;
conf = &chan->conf;
if (chan->conf_err)
{
LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
return chan->conf_err;
}
RT_ASSERT(chan->conf.direction == RT_DMA_MEM_TO_MEM);
dma_addr_src = transfer->src_addr;
dma_addr_dst = transfer->dst_addr;
len = transfer->buffer_len;
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
dma_addr_src, conf->src_addr))
{
return -RT_EINVAL;
}
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
dma_addr_dst, conf->dst_addr))
{
return -RT_EINVAL;
}
if (ctrl->ops->prep_memcpy)
{
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
err = ctrl->ops->prep_memcpy(chan, dma_addr_dst, dma_addr_src, len);
rt_mutex_release(&ctrl->mutex);
}
else
{
err = -RT_ENOSYS;
}
if (!err)
{
rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
}
chan->prep_err = err;
return err;
}
rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
struct rt_dma_slave_transfer *transfer)
{
rt_err_t err;
rt_ubase_t dma_buf_addr;
struct rt_dma_controller *ctrl;
struct rt_dma_slave_config *conf;
enum rt_dma_transfer_direction dir;
if (!chan || !transfer)
{
return -RT_EINVAL;
}
ctrl = chan->ctrl;
conf = &chan->conf;
if (chan->conf_err)
{
LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
return chan->conf_err;
}
dir = chan->conf.direction;
if (dir == RT_DMA_MEM_TO_DEV || dir == RT_DMA_MEM_TO_MEM)
{
dma_buf_addr = transfer->src_addr;
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
dma_buf_addr, conf->src_addr))
{
return -RT_EINVAL;
}
}
else if (dir == RT_DMA_DEV_TO_MEM)
{
dma_buf_addr = transfer->dst_addr;
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
dma_buf_addr, conf->dst_addr))
{
return -RT_EINVAL;
}
}
else
{
dma_buf_addr = ~0UL;
}
if (ctrl->ops->prep_cyclic)
{
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
err = ctrl->ops->prep_cyclic(chan, dma_buf_addr,
transfer->buffer_len, transfer->period_len, dir);
rt_mutex_release(&ctrl->mutex);
}
else
{
err = -RT_ENOSYS;
}
if (!err)
{
rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
}
chan->prep_err = err;
return err;
}
rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
struct rt_dma_slave_transfer *transfer)
{
rt_err_t err;
rt_ubase_t dma_buf_addr;
struct rt_dma_controller *ctrl;
struct rt_dma_slave_config *conf;
enum rt_dma_transfer_direction dir;
if (!chan || !transfer)
{
return -RT_EINVAL;
}
ctrl = chan->ctrl;
conf = &chan->conf;
if (chan->conf_err)
{
LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
return chan->conf_err;
}
dir = chan->conf.direction;
if (dir == RT_DMA_MEM_TO_DEV || dir == RT_DMA_MEM_TO_MEM)
{
dma_buf_addr = transfer->src_addr;
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
dma_buf_addr, conf->src_addr))
{
return -RT_EINVAL;
}
}
else if (dir == RT_DMA_DEV_TO_MEM)
{
dma_buf_addr = transfer->dst_addr;
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
dma_buf_addr, conf->dst_addr))
{
return -RT_EINVAL;
}
}
else
{
dma_buf_addr = ~0UL;
}
if (ctrl->ops->prep_single)
{
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
err = ctrl->ops->prep_single(chan, dma_buf_addr,
transfer->buffer_len, dir);
rt_mutex_release(&ctrl->mutex);
}
else
{
err = -RT_ENOSYS;
}
if (!err)
{
rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
}
chan->prep_err = err;
return err;
}
static struct rt_dma_controller *ofw_find_dma_controller(struct rt_device *dev,
const char *name, struct rt_ofw_cell_args *args)
{
struct rt_dma_controller *ctrl = RT_NULL;
#ifdef RT_USING_OFW
int index;
struct rt_ofw_node *np = dev->ofw_node, *ctrl_np;
if (!np)
{
return RT_NULL;
}
index = rt_ofw_prop_index_of_string(np, "dma-names", name);
if (index < 0)
{
return RT_NULL;
}
if (!rt_ofw_parse_phandle_cells(np, "dmas", "#dma-cells", index, args))
{
ctrl_np = args->data;
if (!rt_ofw_data(ctrl_np))
{
rt_platform_ofw_request(ctrl_np);
}
ctrl = rt_ofw_data(ctrl_np);
rt_ofw_node_put(ctrl_np);
}
#endif /* RT_USING_OFW */
return ctrl;
}
struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name)
{
void *fw_data = RT_NULL;
struct rt_dma_chan *chan;
struct rt_ofw_cell_args dma_args;
struct rt_dma_controller *ctrl = RT_NULL;
if (!dev)
{
return rt_err_ptr(-RT_EINVAL);
}
if (name)
{
fw_data = &dma_args;
ctrl = ofw_find_dma_controller(dev, name, &dma_args);
}
else
{
struct rt_dma_controller *ctrl_tmp;
rt_spin_lock(&dmac_nodes_lock);
rt_list_for_each_entry(ctrl_tmp, &dmac_nodes, list)
{
/* Only memory to memory for uname request */
if (rt_bitmap_test_bit(ctrl_tmp->dir_cap, RT_DMA_MEM_TO_MEM))
{
ctrl = ctrl_tmp;
break;
}
}
rt_spin_unlock(&dmac_nodes_lock);
}
if (rt_is_err_or_null(ctrl))
{
return ctrl ? ctrl : rt_err_ptr(-RT_ENOSYS);
}
if (ctrl->ops->request_chan)
{
chan = ctrl->ops->request_chan(ctrl, dev, fw_data);
}
else
{
chan = rt_calloc(1, sizeof(*chan));
if (!chan)
{
chan = rt_err_ptr(-RT_ENOMEM);
}
}
if (rt_is_err(chan))
{
return chan;
}
if (!chan)
{
LOG_E("%s: unset request channels error", rt_dm_dev_get_name(ctrl->dev));
return rt_err_ptr(-RT_ERROR);
}
chan->name = name;
chan->ctrl = ctrl;
chan->slave = dev;
rt_list_init(&chan->list);
chan->conf_err = -RT_ERROR;
chan->prep_err = -RT_ERROR;
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
rt_list_insert_before(&ctrl->channels_nodes, &chan->list);
rt_mutex_release(&ctrl->mutex);
return chan;
}
rt_err_t rt_dma_chan_release(struct rt_dma_chan *chan)
{
rt_err_t err = RT_EOK;
if (!chan)
{
return -RT_EINVAL;
}
rt_mutex_take(&chan->ctrl->mutex, RT_WAITING_FOREVER);
rt_list_remove(&chan->list);
rt_mutex_release(&chan->ctrl->mutex);
if (chan->ctrl->ops->release_chan)
{
err = chan->ctrl->ops->release_chan(chan);
}
else
{
rt_free(chan);
}
return err;
}

View File

@@ -0,0 +1,691 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#define DBG_TAG "dma.pool"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <mm_aspace.h>
#include <dt-bindings/size.h>
static struct rt_spinlock dma_pools_lock = {};
static rt_list_t dma_pool_nodes = RT_LIST_OBJECT_INIT(dma_pool_nodes);
static struct rt_dma_pool *dma_pool_install(rt_region_t *region);
static void *dma_alloc(struct rt_device *dev, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags);
static void dma_free(struct rt_device *dev, rt_size_t size,
void *cpu_addr, rt_ubase_t dma_handle, rt_ubase_t flags);
rt_inline void region_pool_lock(void)
{
rt_hw_spin_lock(&dma_pools_lock.lock);
}
rt_inline void region_pool_unlock(void)
{
rt_hw_spin_unlock(&dma_pools_lock.lock);
}
static rt_err_t dma_map_coherent_sync_out_data(struct rt_device *dev,
void *data, rt_size_t size, rt_ubase_t *dma_handle, rt_ubase_t flags)
{
if (dma_handle)
{
*dma_handle = (rt_ubase_t)rt_kmem_v2p(data);
}
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data, size);
return RT_EOK;
}
static rt_err_t dma_map_coherent_sync_in_data(struct rt_device *dev,
void *out_data, rt_size_t size, rt_ubase_t dma_handle, rt_ubase_t flags)
{
rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, out_data, size);
return RT_EOK;
}
static const struct rt_dma_map_ops dma_map_coherent_ops =
{
.sync_out_data = dma_map_coherent_sync_out_data,
.sync_in_data = dma_map_coherent_sync_in_data,
};
static rt_err_t dma_map_nocoherent_sync_out_data(struct rt_device *dev,
void *data, rt_size_t size, rt_ubase_t *dma_handle, rt_ubase_t flags)
{
if (dma_handle)
{
*dma_handle = (rt_ubase_t)rt_kmem_v2p(data);
}
return RT_EOK;
}
static rt_err_t dma_map_nocoherent_sync_in_data(struct rt_device *dev,
void *out_data, rt_size_t size, rt_ubase_t dma_handle, rt_ubase_t flags)
{
return RT_EOK;
}
static const struct rt_dma_map_ops dma_map_nocoherent_ops =
{
.sync_out_data = dma_map_nocoherent_sync_out_data,
.sync_in_data = dma_map_nocoherent_sync_in_data,
};
#ifdef RT_USING_OFW
rt_inline rt_ubase_t ofw_addr_cpu2dma(struct rt_device *dev, rt_ubase_t addr)
{
return (rt_ubase_t)rt_ofw_translate_cpu2dma(dev->ofw_node, addr);
}
rt_inline rt_ubase_t ofw_addr_dma2cpu(struct rt_device *dev, rt_ubase_t addr)
{
return (rt_ubase_t)rt_ofw_translate_dma2cpu(dev->ofw_node, addr);
}
static void *ofw_dma_map_alloc(struct rt_device *dev, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags)
{
void *cpu_addr = dma_alloc(dev, size, dma_handle, flags);
if (cpu_addr && dma_handle)
{
*dma_handle = ofw_addr_cpu2dma(dev, *dma_handle);
}
return cpu_addr;
}
static void ofw_dma_map_free(struct rt_device *dev, rt_size_t size,
void *cpu_addr, rt_ubase_t dma_handle, rt_ubase_t flags)
{
dma_handle = ofw_addr_dma2cpu(dev, dma_handle);
dma_free(dev, size, cpu_addr, dma_handle, flags);
}
static rt_err_t ofw_dma_map_sync_out_data(struct rt_device *dev,
void *data, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags)
{
rt_err_t err;
if (flags & RT_DMA_F_NOCACHE)
{
err = dma_map_nocoherent_sync_out_data(dev, data, size, dma_handle, flags);
}
else
{
err = dma_map_coherent_sync_out_data(dev, data, size, dma_handle, flags);
}
if (!err && dma_handle)
{
*dma_handle = ofw_addr_cpu2dma(dev, *dma_handle);
}
return err;
}
static rt_err_t ofw_dma_map_sync_in_data(struct rt_device *dev,
void *out_data, rt_size_t size,
rt_ubase_t dma_handle, rt_ubase_t flags)
{
dma_handle = ofw_addr_dma2cpu(dev, dma_handle);
if (flags & RT_DMA_F_NOCACHE)
{
return dma_map_nocoherent_sync_in_data(dev, out_data, size, dma_handle, flags);
}
return dma_map_coherent_sync_in_data(dev, out_data, size, dma_handle, flags);
}
static const struct rt_dma_map_ops ofw_dma_map_ops =
{
.alloc = ofw_dma_map_alloc,
.free = ofw_dma_map_free,
.sync_out_data = ofw_dma_map_sync_out_data,
.sync_in_data = ofw_dma_map_sync_in_data,
};
static const struct rt_dma_map_ops *ofw_device_dma_ops(struct rt_device *dev)
{
rt_err_t err;
int region_nr = 0;
const fdt32_t *cell;
rt_phandle phandle;
rt_region_t region;
struct rt_ofw_prop *prop;
struct rt_dma_pool *dma_pool;
const struct rt_dma_map_ops *ops = RT_NULL;
struct rt_ofw_node *mem_np, *np = dev->ofw_node;
rt_ofw_foreach_prop_u32(np, "memory-region", prop, cell, phandle)
{
rt_uint64_t addr, size;
if (!(mem_np = rt_ofw_find_node_by_phandle(phandle)))
{
if (region_nr == 0)
{
return RT_NULL;
}
break;
}
if ((err = rt_ofw_get_address(mem_np, 0, &addr, &size)))
{
LOG_E("%s: Read '%s' error = %s", rt_ofw_node_full_name(mem_np),
"memory-region", rt_strerror(err));
rt_ofw_node_put(mem_np);
continue;
}
region.start = addr;
region.end = addr + size;
region.name = rt_dm_dev_get_name(dev);
rt_ofw_node_put(mem_np);
if (!(dma_pool = dma_pool_install(&region)))
{
return RT_NULL;
}
if (rt_ofw_prop_read_bool(mem_np, "no-map"))
{
dma_pool->flags |= RT_DMA_F_NOMAP;
}
if (!rt_dma_device_is_coherent(dev))
{
dma_pool->flags |= RT_DMA_F_NOCACHE;
}
dma_pool->dev = dev;
++region_nr;
}
if (region_nr)
{
ops = &ofw_dma_map_ops;
}
return ops;
}
#endif /* RT_USING_OFW */
static const struct rt_dma_map_ops *device_dma_ops(struct rt_device *dev)
{
const struct rt_dma_map_ops *ops = dev->dma_ops;
if (ops)
{
return ops;
}
#ifdef RT_USING_OFW
if (dev->ofw_node && (ops = ofw_device_dma_ops(dev)))
{
return ops;
}
#endif
if (rt_dma_device_is_coherent(dev))
{
ops = &dma_map_coherent_ops;
}
else
{
ops = &dma_map_nocoherent_ops;
}
dev->dma_ops = ops;
return ops;
}
static rt_ubase_t dma_pool_alloc(struct rt_dma_pool *pool, rt_size_t size)
{
rt_size_t bit, next_bit, end_bit, max_bits;
size = RT_DIV_ROUND_UP(size, ARCH_PAGE_SIZE);
max_bits = pool->bits - size;
rt_bitmap_for_each_clear_bit(pool->map, bit, max_bits)
{
end_bit = bit + size;
for (next_bit = bit + 1; next_bit < end_bit; ++next_bit)
{
if (rt_bitmap_test_bit(pool->map, next_bit))
{
bit = next_bit;
goto _next;
}
}
if (next_bit == end_bit)
{
while (next_bit --> bit)
{
rt_bitmap_set_bit(pool->map, next_bit);
}
return pool->start + bit * ARCH_PAGE_SIZE;
}
_next:
}
return RT_NULL;
}
static void dma_pool_free(struct rt_dma_pool *pool, rt_ubase_t offset, rt_size_t size)
{
rt_size_t bit = (offset - pool->start) / ARCH_PAGE_SIZE, end_bit;
size = RT_DIV_ROUND_UP(size, ARCH_PAGE_SIZE);
end_bit = bit + size;
for (; bit < end_bit; ++bit)
{
rt_bitmap_clear_bit(pool->map, bit);
}
}
static void *dma_alloc(struct rt_device *dev, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags)
{
void *dma_buffer = RT_NULL;
struct rt_dma_pool *pool;
region_pool_lock();
rt_list_for_each_entry(pool, &dma_pool_nodes, list)
{
if (pool->flags & RT_DMA_F_DEVICE)
{
if (!(flags & RT_DMA_F_DEVICE) || pool->dev != dev)
{
continue;
}
}
else if ((flags & RT_DMA_F_DEVICE))
{
continue;
}
if ((flags & RT_DMA_F_NOMAP) && !((pool->flags & RT_DMA_F_NOMAP)))
{
continue;
}
if ((flags & RT_DMA_F_32BITS) && !((pool->flags & RT_DMA_F_32BITS)))
{
continue;
}
if ((flags & RT_DMA_F_LINEAR) && !((pool->flags & RT_DMA_F_LINEAR)))
{
continue;
}
*dma_handle = dma_pool_alloc(pool, size);
if (*dma_handle && !(flags & RT_DMA_F_NOMAP))
{
if (flags & RT_DMA_F_NOCACHE)
{
dma_buffer = rt_ioremap_nocache((void *)*dma_handle, size);
}
else
{
dma_buffer = rt_ioremap_cached((void *)*dma_handle, size);
}
if (!dma_buffer)
{
dma_pool_free(pool, *dma_handle, size);
continue;
}
break;
}
else if (*dma_handle)
{
dma_buffer = (void *)*dma_handle;
break;
}
}
region_pool_unlock();
return dma_buffer;
}
static void dma_free(struct rt_device *dev, rt_size_t size,
void *cpu_addr, rt_ubase_t dma_handle, rt_ubase_t flags)
{
struct rt_dma_pool *pool;
region_pool_lock();
rt_list_for_each_entry(pool, &dma_pool_nodes, list)
{
if (dma_handle >= pool->region.start &&
dma_handle <= pool->region.end)
{
rt_iounmap(cpu_addr);
dma_pool_free(pool, dma_handle, size);
break;
}
}
region_pool_unlock();
}
void *rt_dma_alloc(struct rt_device *dev, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags)
{
void *dma_buffer = RT_NULL;
rt_ubase_t dma_handle_s = 0;
const struct rt_dma_map_ops *ops;
if (!dev || !size)
{
return RT_NULL;
}
ops = device_dma_ops(dev);
if (ops->alloc)
{
dma_buffer = ops->alloc(dev, size, &dma_handle_s, flags);
}
else
{
dma_buffer = dma_alloc(dev, size, &dma_handle_s, flags);
}
if (!dma_buffer)
{
return dma_buffer;
}
if (dma_handle)
{
*dma_handle = dma_handle_s;
}
return dma_buffer;
}
void rt_dma_free(struct rt_device *dev, rt_size_t size,
void *cpu_addr, rt_ubase_t dma_handle, rt_ubase_t flags)
{
const struct rt_dma_map_ops *ops;
if (!dev || !size || !cpu_addr)
{
return;
}
ops = device_dma_ops(dev);
if (ops->free)
{
ops->free(dev, size, cpu_addr, dma_handle, flags);
}
else
{
dma_free(dev, size, cpu_addr, dma_handle, flags);
}
}
rt_err_t rt_dma_sync_out_data(struct rt_device *dev, void *data, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags)
{
rt_err_t err;
rt_ubase_t dma_handle_s = 0;
const struct rt_dma_map_ops *ops;
if (!data || !size)
{
return -RT_EINVAL;
}
ops = device_dma_ops(dev);
err = ops->sync_out_data(dev, data, size, &dma_handle_s, flags);
if (dma_handle)
{
*dma_handle = dma_handle_s;
}
return err;
}
rt_err_t rt_dma_sync_in_data(struct rt_device *dev, void *out_data, rt_size_t size,
rt_ubase_t dma_handle, rt_ubase_t flags)
{
rt_err_t err;
const struct rt_dma_map_ops *ops;
if (!out_data || !size)
{
return -RT_EINVAL;
}
ops = device_dma_ops(dev);
err = ops->sync_in_data(dev, out_data, size, dma_handle, flags);
return err;
}
static struct rt_dma_pool *dma_pool_install(rt_region_t *region)
{
rt_err_t err;
struct rt_dma_pool *pool;
if (!(pool = rt_calloc(1, sizeof(*pool))))
{
LOG_E("Install pool[%p, %p] error = %s",
region->start, region->end, rt_strerror(-RT_ENOMEM));
return RT_NULL;
}
rt_memcpy(&pool->region, region, sizeof(*region));
pool->flags |= RT_DMA_F_LINEAR;
if (region->end < 4UL * SIZE_GB)
{
pool->flags |= RT_DMA_F_32BITS;
}
pool->start = RT_ALIGN(pool->region.start, ARCH_PAGE_SIZE);
pool->bits = (pool->region.end - pool->start) / ARCH_PAGE_SIZE;
if (!pool->bits)
{
err = -RT_EINVAL;
goto _fail;
}
pool->map = rt_calloc(RT_BITMAP_LEN(pool->bits), sizeof(*pool->map));
if (!pool->map)
{
err = -RT_ENOMEM;
goto _fail;
}
rt_list_init(&pool->list);
region_pool_lock();
rt_list_insert_before(&dma_pool_nodes, &pool->list);
region_pool_unlock();
return pool;
_fail:
rt_free(pool);
LOG_E("Install pool[%p, %p] error = %s",
region->start, region->end, rt_strerror(err));
return RT_NULL;
}
struct rt_dma_pool *rt_dma_pool_install(rt_region_t *region)
{
struct rt_dma_pool *pool;
if (!region)
{
return RT_NULL;
}
if ((pool = dma_pool_install(region)))
{
region = &pool->region;
LOG_I("%s: Reserved %u.%u MiB at %p",
region->name,
(region->end - region->start) / SIZE_MB,
(region->end - region->start) / SIZE_KB & (SIZE_KB - 1),
region->start);
}
return pool;
}
rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
rt_size_t cma_size, rt_size_t coherent_pool_size)
{
struct rt_dma_pool *pool;
rt_region_t *region = region_list, *region_high = RT_NULL, cma, coherent_pool;
if (!region_list || !list_len || cma_size < coherent_pool_size)
{
return -RT_EINVAL;
}
for (rt_size_t i = 0; i < list_len; ++i, ++region)
{
if (!region->name)
{
continue;
}
/* Always use low address in 4G */
if (region->end - region->start >= cma_size)
{
if ((rt_ssize_t)((4UL * SIZE_GB) - region->start) < cma_size)
{
region_high = region;
continue;
}
goto _found;
}
}
if (region_high)
{
region = region_high;
LOG_W("No available DMA zone in 4G");
goto _found;
}
return -RT_EEMPTY;
_found:
if (region->end - region->start != cma_size)
{
cma.start = region->start;
cma.end = cma.start + cma_size;
/* Update input region */
region->start += cma_size;
}
else
{
rt_memcpy(&cma, region, sizeof(cma));
}
coherent_pool.name = "coherent-pool";
coherent_pool.start = cma.start;
coherent_pool.end = coherent_pool.start + coherent_pool_size;
cma.name = "cma";
cma.start += coherent_pool_size;
if (!(pool = rt_dma_pool_install(&coherent_pool)))
{
return -RT_ENOMEM;
}
/* Use: CMA > coherent-pool */
if (!(pool = rt_dma_pool_install(&cma)))
{
return -RT_ENOMEM;
}
return RT_EOK;
}
#if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
static int list_dma_pool(int argc, char**argv)
{
int count = 0;
rt_region_t *region;
struct rt_dma_pool *pool;
rt_kprintf("%-*.s Region\n", RT_NAME_MAX, "Name");
region_pool_lock();
rt_list_for_each_entry(pool, &dma_pool_nodes, list)
{
region = &pool->region;
rt_kprintf("%-*.s [%p, %p]\n", RT_NAME_MAX, region->name,
region->start, region->end);
++count;
}
rt_kprintf("%d DMA memory found\n", count);
region_pool_unlock();
return 0;
}
MSH_CMD_EXPORT(list_dma_pool, dump all dma memory pool);
#endif /* RT_USING_CONSOLE && RT_USING_MSH */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -320,7 +320,14 @@ static rt_err_t rt_hwtimer_control(struct rt_device *dev, int cmd, void *args)
break;
default:
{
result = -RT_ENOSYS;
if (timer->ops->control != RT_NULL)
{
result = timer->ops->control(timer, cmd, args);
}
else
{
result = -RT_ENOSYS;
}
}
break;
}

View File

@@ -17,203 +17,227 @@ if RT_USING_I2C
default n
endif
config RT_USING_SOFT_I2C
menuconfig RT_USING_SOFT_I2C
bool "Use GPIO to soft simulate I2C"
default n
select RT_USING_PIN
select RT_USING_I2C_BITOPS
if RT_USING_SOFT_I2C
config RT_USING_SOFT_I2C1
bool "Enable I2C1 Bus (software simulation)"
default y
if RT_USING_SOFT_I2C1
config RT_SOFT_I2C1_SCL_PIN
int "SCL pin number"
range 0 32767
default 1
config RT_SOFT_I2C1_SDA_PIN
int "SDA pin number"
range 0 32767
default 2
config RT_SOFT_I2C1_BUS_NAME
string "Bus name"
default "i2c1"
config RT_SOFT_I2C1_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C1_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
config RT_USING_SOFT_I2C2
bool "Enable I2C2 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C2
config RT_SOFT_I2C2_SCL_PIN
int "SCL pin number"
range 0 32767
default 3
config RT_SOFT_I2C2_SDA_PIN
int "SDA pin number"
range 0 32767
default 4
config RT_SOFT_I2C2_BUS_NAME
string "Bus name"
default "i2c2"
config RT_SOFT_I2C2_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C2_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
config RT_USING_SOFT_I2C3
bool "Enable I2C3 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C3
config RT_SOFT_I2C3_SCL_PIN
int "SCL pin number"
range 0 32767
default 5
config RT_SOFT_I2C3_SDA_PIN
int "SDA pin number"
range 0 32767
default 6
config RT_SOFT_I2C3_BUS_NAME
string "Bus name"
default "i2c3"
config RT_SOFT_I2C3_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C3_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
config RT_USING_SOFT_I2C4
bool "Enable I2C4 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C4
config RT_SOFT_I2C4_SCL_PIN
int "SCL pin number"
range 0 32767
default 7
config RT_SOFT_I2C4_SDA_PIN
int "SDA pin number"
range 0 32767
default 8
config RT_SOFT_I2C4_BUS_NAME
string "Bus name"
default "i2c4"
config RT_SOFT_I2C4_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C4_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
config RT_USING_SOFT_I2C5
bool "Enable I2C5 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C5
config RT_SOFT_I2C5_SCL_PIN
int "SCL pin number"
range 0 32767
default 9
config RT_SOFT_I2C5_SDA_PIN
int "SDA pin number"
range 0 32767
default 10
config RT_SOFT_I2C5_BUS_NAME
string "Bus name"
default "i2c5"
config RT_SOFT_I2C5_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C5_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
config RT_USING_SOFT_I2C6
bool "Enable I2C6 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C6
config RT_SOFT_I2C6_SCL_PIN
int "SCL pin number"
range 0 32767
default 11
config RT_SOFT_I2C6_SDA_PIN
int "SDA pin number"
range 0 32767
default 12
config RT_SOFT_I2C6_BUS_NAME
string "Bus name"
default "i2c6"
config RT_SOFT_I2C6_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C6_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
config RT_USING_SOFT_I2C7
bool "Enable I2C7 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C7
config RT_SOFT_I2C7_SCL_PIN
int "SCL pin number"
range 0 32767
default 13
config RT_SOFT_I2C7_SDA_PIN
int "SDA pin number"
range 0 32767
default 14
config RT_SOFT_I2C7_BUS_NAME
string "Bus name"
default "i2c7"
config RT_SOFT_I2C7_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C7_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
config RT_USING_SOFT_I2C8
bool "Enable I2C8 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C8
config RT_SOFT_I2C8_SCL_PIN
int "SCL pin number"
range 0 32767
default 15
config RT_SOFT_I2C8_SDA_PIN
int "SDA pin number"
range 0 32767
default 16
config RT_SOFT_I2C8_BUS_NAME
string "Bus name"
default "i2c8"
config RT_SOFT_I2C8_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C8_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
endif
if RT_USING_SOFT_I2C
menuconfig RT_USING_SOFT_I2C0
bool "Enable I2C0 Bus (software simulation)"
default y
if RT_USING_SOFT_I2C0
config RT_SOFT_I2C0_SCL_PIN
int "SCL pin number"
range 0 32767
default 1
config RT_SOFT_I2C0_SDA_PIN
int "SDA pin number"
range 0 32767
default 2
config RT_SOFT_I2C0_BUS_NAME
string "Bus name"
default "i2c0"
config RT_SOFT_I2C0_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C0_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
menuconfig RT_USING_SOFT_I2C1
bool "Enable I2C1 Bus (software simulation)"
default y
if RT_USING_SOFT_I2C1
config RT_SOFT_I2C1_SCL_PIN
int "SCL pin number"
range 0 32767
default 3
config RT_SOFT_I2C1_SDA_PIN
int "SDA pin number"
range 0 32767
default 4
config RT_SOFT_I2C1_BUS_NAME
string "Bus name"
default "i2c1"
config RT_SOFT_I2C1_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C1_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
menuconfig RT_USING_SOFT_I2C2
bool "Enable I2C2 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C2
config RT_SOFT_I2C2_SCL_PIN
int "SCL pin number"
range 0 32767
default 5
config RT_SOFT_I2C2_SDA_PIN
int "SDA pin number"
range 0 32767
default 6
config RT_SOFT_I2C2_BUS_NAME
string "Bus name"
default "i2c2"
config RT_SOFT_I2C2_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C2_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
menuconfig RT_USING_SOFT_I2C3
bool "Enable I2C3 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C3
config RT_SOFT_I2C3_SCL_PIN
int "SCL pin number"
range 0 32767
default 7
config RT_SOFT_I2C3_SDA_PIN
int "SDA pin number"
range 0 32767
default 8
config RT_SOFT_I2C3_BUS_NAME
string "Bus name"
default "i2c3"
config RT_SOFT_I2C3_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C3_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
menuconfig RT_USING_SOFT_I2C4
bool "Enable I2C4 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C4
config RT_SOFT_I2C4_SCL_PIN
int "SCL pin number"
range 0 32767
default 9
config RT_SOFT_I2C4_SDA_PIN
int "SDA pin number"
range 0 32767
default 10
config RT_SOFT_I2C4_BUS_NAME
string "Bus name"
default "i2c4"
config RT_SOFT_I2C4_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C4_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
menuconfig RT_USING_SOFT_I2C5
bool "Enable I2C5 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C5
config RT_SOFT_I2C5_SCL_PIN
int "SCL pin number"
range 0 32767
default 11
config RT_SOFT_I2C5_SDA_PIN
int "SDA pin number"
range 0 32767
default 12
config RT_SOFT_I2C5_BUS_NAME
string "Bus name"
default "i2c5"
config RT_SOFT_I2C5_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C5_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
menuconfig RT_USING_SOFT_I2C6
bool "Enable I2C6 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C6
config RT_SOFT_I2C6_SCL_PIN
int "SCL pin number"
range 0 32767
default 13
config RT_SOFT_I2C6_SDA_PIN
int "SDA pin number"
range 0 32767
default 14
config RT_SOFT_I2C6_BUS_NAME
string "Bus name"
default "i2c6"
config RT_SOFT_I2C6_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C6_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
menuconfig RT_USING_SOFT_I2C7
bool "Enable I2C7 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C7
config RT_SOFT_I2C7_SCL_PIN
int "SCL pin number"
range 0 32767
default 15
config RT_SOFT_I2C7_SDA_PIN
int "SDA pin number"
range 0 32767
default 16
config RT_SOFT_I2C7_BUS_NAME
string "Bus name"
default "i2c7"
config RT_SOFT_I2C7_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C7_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
menuconfig RT_USING_SOFT_I2C8
bool "Enable I2C8 Bus (software simulation)"
default n
if RT_USING_SOFT_I2C8
config RT_SOFT_I2C8_SCL_PIN
int "SCL pin number"
range 0 32767
default 17
config RT_SOFT_I2C8_SDA_PIN
int "SDA pin number"
range 0 32767
default 18
config RT_SOFT_I2C8_BUS_NAME
string "Bus name"
default "i2c8"
config RT_SOFT_I2C8_TIMING_DELAY
int "Timing delay (us)"
range 0 32767
default 10
config RT_SOFT_I2C8_TIMING_TIMEOUT
int "Timing timeout (tick)"
range 0 32767
default 10
endif
endif
endif

View File

@@ -3,16 +3,16 @@ from building import *
cwd = GetCurrentDir()
src = Split("""
i2c_core.c
i2c_dev.c
dev_i2c_core.c
dev_i2c_dev.c
""")
if GetDepend('RT_USING_I2C_BITOPS'):
src = src + ['i2c-bit-ops.c']
src = src + ['dev_i2c_bit_ops.c']
if GetDepend('RT_USING_SOFT_I2C'):
src = src + ['soft_i2c.c']
src = src + ['dev_soft_i2c.c']
if GetDepend(['RT_USING_DM']):
src += ['i2c_bus.c', 'i2c_dm.c']
src += ['dev_i2c_bus.c', 'dev_i2c_dm.c']
# The set of source files associated with this SConscript file.
path = [cwd + '/../include']

View File

@@ -0,0 +1,464 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-04-25 weety first version
*/
#include <rtdevice.h>
#define DBG_TAG "I2C"
#ifdef RT_I2C_BITOPS_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif
#include <rtdbg.h>
#define SET_SDA(ops, val) ops->set_sda(ops->data, val)
#define SET_SCL(ops, val) ops->set_scl(ops->data, val)
#define GET_SDA(ops) ops->get_sda(ops->data)
#define GET_SCL(ops) ops->get_scl(ops->data)
rt_inline void i2c_delay(struct rt_i2c_bit_ops *ops)
{
ops->udelay((ops->delay_us + 1) >> 1);
}
rt_inline void i2c_delay2(struct rt_i2c_bit_ops *ops)
{
ops->udelay(ops->delay_us);
}
#define SDA_L(ops) SET_SDA(ops, 0)
#define SDA_H(ops) SET_SDA(ops, 1)
#define SCL_L(ops) SET_SCL(ops, 0)
/**
* release scl line, and wait scl line to high.
*/
static rt_err_t SCL_H(struct rt_i2c_bit_ops *ops)
{
rt_tick_t start;
SET_SCL(ops, 1);
if (!ops->get_scl)
goto done;
start = rt_tick_get();
while (!GET_SCL(ops))
{
if ((rt_tick_get() - start) > ops->timeout)
return -RT_ETIMEOUT;
i2c_delay(ops);
}
#ifdef RT_I2C_BITOPS_DEBUG
if (rt_tick_get() != start)
{
LOG_D("wait %ld tick for SCL line to go high",
rt_tick_get() - start);
}
#endif
done:
i2c_delay(ops);
return RT_EOK;
}
static void i2c_start(struct rt_i2c_bit_ops *ops)
{
#ifdef RT_I2C_BITOPS_DEBUG
if (ops->get_scl && !GET_SCL(ops))
{
LOG_E("I2C bus error, SCL line low");
}
if (ops->get_sda && !GET_SDA(ops))
{
LOG_E("I2C bus error, SDA line low");
}
#endif
SDA_L(ops);
i2c_delay(ops);
SCL_L(ops);
}
static void i2c_restart(struct rt_i2c_bit_ops *ops)
{
SDA_H(ops);
SCL_H(ops);
i2c_delay(ops);
SDA_L(ops);
i2c_delay(ops);
SCL_L(ops);
}
static void i2c_stop(struct rt_i2c_bit_ops *ops)
{
SDA_L(ops);
i2c_delay(ops);
SCL_H(ops);
i2c_delay(ops);
SDA_H(ops);
i2c_delay2(ops);
}
rt_inline rt_bool_t i2c_waitack(struct rt_i2c_bit_ops *ops)
{
rt_bool_t ack;
SDA_H(ops);
i2c_delay(ops);
if (SCL_H(ops) < 0)
{
LOG_W("wait ack timeout");
return -RT_ETIMEOUT;
}
ack = !GET_SDA(ops); /* ACK : SDA pin is pulled low */
LOG_D("%s", ack ? "ACK" : "NACK");
SCL_L(ops);
return ack;
}
static rt_int32_t i2c_writeb(struct rt_i2c_bus_device *bus, rt_uint8_t data)
{
rt_int32_t i;
rt_uint8_t bit;
struct rt_i2c_bit_ops *ops = (struct rt_i2c_bit_ops *)bus->priv;
for (i = 7; i >= 0; i--)
{
SCL_L(ops);
bit = (data >> i) & 1;
SET_SDA(ops, bit);
i2c_delay(ops);
if (SCL_H(ops) < 0)
{
LOG_D("i2c_writeb: 0x%02x, "
"wait scl pin high timeout at bit %d",
data, i);
return -RT_ETIMEOUT;
}
}
SCL_L(ops);
i2c_delay(ops);
return i2c_waitack(ops);
}
static rt_int32_t i2c_readb(struct rt_i2c_bus_device *bus)
{
rt_uint8_t i;
rt_uint8_t data = 0;
struct rt_i2c_bit_ops *ops = (struct rt_i2c_bit_ops *)bus->priv;
SDA_H(ops);
i2c_delay(ops);
for (i = 0; i < 8; i++)
{
data <<= 1;
if (SCL_H(ops) < 0)
{
LOG_D("i2c_readb: wait scl pin high "
"timeout at bit %d", 7 - i);
return -RT_ETIMEOUT;
}
if (GET_SDA(ops))
data |= 1;
SCL_L(ops);
i2c_delay2(ops);
}
return data;
}
static rt_ssize_t i2c_send_bytes(struct rt_i2c_bus_device *bus,
struct rt_i2c_msg *msg)
{
rt_int32_t ret;
rt_size_t bytes = 0;
const rt_uint8_t *ptr = msg->buf;
rt_int32_t count = msg->len;
rt_uint16_t ignore_nack = msg->flags & RT_I2C_IGNORE_NACK;
while (count > 0)
{
ret = i2c_writeb(bus, *ptr);
if ((ret > 0) || (ignore_nack && (ret == 0)))
{
count --;
ptr ++;
bytes ++;
}
else if (ret == 0)
{
LOG_D("send bytes: NACK.");
return 0;
}
else
{
LOG_E("send bytes: error %d", ret);
return ret;
}
}
return bytes;
}
static rt_err_t i2c_send_ack_or_nack(struct rt_i2c_bus_device *bus, int ack)
{
struct rt_i2c_bit_ops *ops = (struct rt_i2c_bit_ops *)bus->priv;
if (ack)
SET_SDA(ops, 0);
i2c_delay(ops);
if (SCL_H(ops) < 0)
{
LOG_E("ACK or NACK timeout.");
return -RT_ETIMEOUT;
}
SCL_L(ops);
return RT_EOK;
}
static rt_ssize_t i2c_recv_bytes(struct rt_i2c_bus_device *bus,
struct rt_i2c_msg *msg)
{
rt_int32_t val;
rt_int32_t bytes = 0; /* actual bytes */
rt_uint8_t *ptr = msg->buf;
rt_int32_t count = msg->len;
const rt_uint32_t flags = msg->flags;
while (count > 0)
{
val = i2c_readb(bus);
if (val >= 0)
{
*ptr = val;
bytes ++;
}
else
{
break;
}
ptr ++;
count --;
LOG_D("recieve bytes: 0x%02x, %s",
val, (flags & RT_I2C_NO_READ_ACK) ?
"(No ACK/NACK)" : (count ? "ACK" : "NACK"));
if (!(flags & RT_I2C_NO_READ_ACK))
{
val = i2c_send_ack_or_nack(bus, count);
if (val < 0)
return val;
}
}
return bytes;
}
static rt_int32_t i2c_send_address(struct rt_i2c_bus_device *bus,
rt_uint8_t addr,
rt_int32_t retries)
{
struct rt_i2c_bit_ops *ops = (struct rt_i2c_bit_ops *)bus->priv;
rt_int32_t i;
rt_err_t ret = 0;
for (i = 0; i <= retries; i++)
{
ret = i2c_writeb(bus, addr);
if (ret == 1 || i == retries)
break;
LOG_D("send stop condition");
i2c_stop(ops);
i2c_delay2(ops);
LOG_D("send start condition");
i2c_start(ops);
}
return ret;
}
static rt_err_t i2c_bit_send_address(struct rt_i2c_bus_device *bus,
struct rt_i2c_msg *msg)
{
rt_uint16_t flags = msg->flags;
rt_uint16_t ignore_nack = msg->flags & RT_I2C_IGNORE_NACK;
struct rt_i2c_bit_ops *ops = (struct rt_i2c_bit_ops *)bus->priv;
rt_uint8_t addr1, addr2;
rt_int32_t retries;
rt_err_t ret;
retries = ignore_nack ? 0 : bus->retries;
if (flags & RT_I2C_ADDR_10BIT)
{
addr1 = 0xf0 | ((msg->addr >> 7) & 0x06);
addr2 = msg->addr & 0xff;
LOG_D("addr1: %d, addr2: %d", addr1, addr2);
ret = i2c_send_address(bus, addr1, retries);
if ((ret != 1) && !ignore_nack)
{
LOG_W("NACK: sending first addr");
return -RT_EIO;
}
ret = i2c_writeb(bus, addr2);
if ((ret != 1) && !ignore_nack)
{
LOG_W("NACK: sending second addr");
return -RT_EIO;
}
if (flags & RT_I2C_RD)
{
LOG_D("send repeated start condition");
i2c_restart(ops);
addr1 |= 0x01;
ret = i2c_send_address(bus, addr1, retries);
if ((ret != 1) && !ignore_nack)
{
LOG_E("NACK: sending repeated addr");
return -RT_EIO;
}
}
}
else
{
/* 7-bit addr */
addr1 = msg->addr << 1;
if (flags & RT_I2C_RD)
addr1 |= 1;
ret = i2c_send_address(bus, addr1, retries);
if ((ret != 1) && !ignore_nack)
return -RT_EIO;
}
return RT_EOK;
}
static rt_ssize_t i2c_bit_xfer(struct rt_i2c_bus_device *bus,
struct rt_i2c_msg msgs[],
rt_uint32_t num)
{
struct rt_i2c_msg *msg;
struct rt_i2c_bit_ops *ops = (struct rt_i2c_bit_ops *)bus->priv;
rt_int32_t ret;
rt_uint32_t i;
rt_uint16_t ignore_nack;
if((ops->i2c_pin_init_flag == RT_FALSE) && (ops->pin_init != RT_NULL))
{
ops->pin_init();
ops->i2c_pin_init_flag = RT_TRUE;
}
if (num == 0) return 0;
for (i = 0; i < num; i++)
{
msg = &msgs[i];
ignore_nack = msg->flags & RT_I2C_IGNORE_NACK;
if (!(msg->flags & RT_I2C_NO_START))
{
if (i)
{
i2c_restart(ops);
}
else
{
LOG_D("send start condition");
i2c_start(ops);
}
ret = i2c_bit_send_address(bus, msg);
if ((ret != RT_EOK) && !ignore_nack)
{
LOG_D("receive NACK from device addr 0x%02x msg %d",
msgs[i].addr, i);
goto out;
}
}
if (msg->flags & RT_I2C_RD)
{
ret = i2c_recv_bytes(bus, msg);
if (ret >= 1)
{
LOG_D("read %d byte%s", ret, ret == 1 ? "" : "s");
}
if (ret < msg->len)
{
if (ret >= 0)
ret = -RT_EIO;
goto out;
}
}
else
{
ret = i2c_send_bytes(bus, msg);
if (ret >= 1)
{
LOG_D("write %d byte%s", ret, ret == 1 ? "" : "s");
}
if (ret < msg->len)
{
if (ret >= 0)
ret = -RT_ERROR;
goto out;
}
}
}
ret = i;
out:
if (!(msg->flags & RT_I2C_NO_STOP))
{
LOG_D("send stop condition");
i2c_stop(ops);
}
return ret;
}
static const struct rt_i2c_bus_device_ops i2c_bit_bus_ops =
{
i2c_bit_xfer,
RT_NULL,
RT_NULL
};
rt_err_t rt_i2c_bit_add_bus(struct rt_i2c_bus_device *bus,
const char *bus_name)
{
bus->ops = &i2c_bit_bus_ops;
return rt_i2c_bus_device_register(bus, bus_name);
}

View File

@@ -0,0 +1,182 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-06 GuEe-GUI first version
*/
#include <rtdevice.h>
#define DBG_TAG "dev.i2c.bus"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static struct rt_bus i2c_bus;
void i2c_bus_scan_clients(struct rt_i2c_bus_device *bus)
{
#ifdef RT_USING_OFW
if (bus->parent.ofw_node)
{
struct rt_ofw_node *np = bus->parent.ofw_node, *child_np, *i2c_client_np;
rt_ofw_foreach_available_child_node(np, child_np)
{
rt_uint32_t client_addr;
struct rt_i2c_client *client;
if (rt_ofw_prop_read_bool(child_np, "compatible"))
{
i2c_client_np = child_np;
}
else
{
/* Maybe in i2c-mux */
i2c_client_np = rt_ofw_get_next_child(child_np, RT_NULL);
if (!rt_ofw_prop_read_bool(i2c_client_np, "compatible"))
{
continue;
}
}
client = rt_calloc(1, sizeof(*client));
if (!client)
{
rt_ofw_node_put(i2c_client_np);
LOG_E("Not memory to create i2c client: %s",
rt_ofw_node_full_name(i2c_client_np));
return;
}
rt_ofw_prop_read_u32(i2c_client_np, "reg", &client_addr);
client->parent.ofw_node = i2c_client_np;
client->name = rt_ofw_node_name(i2c_client_np);
client->bus = bus;
client->client_addr = client_addr;
rt_i2c_device_register(client);
if (i2c_client_np != child_np)
{
rt_ofw_node_put(i2c_client_np);
}
}
}
#endif /* RT_USING_OFW */
}
rt_err_t rt_i2c_driver_register(struct rt_i2c_driver *driver)
{
RT_ASSERT(driver != RT_NULL);
driver->parent.bus = &i2c_bus;
return rt_driver_register(&driver->parent);
}
rt_err_t rt_i2c_device_register(struct rt_i2c_client *client)
{
RT_ASSERT(client != RT_NULL);
return rt_bus_add_device(&i2c_bus, &client->parent);
}
static rt_bool_t i2c_match(rt_driver_t drv, rt_device_t dev)
{
const struct rt_i2c_device_id *id;
struct rt_i2c_driver *driver = rt_container_of(drv, struct rt_i2c_driver, parent);
struct rt_i2c_client *client = rt_container_of(dev, struct rt_i2c_client, parent);
if ((id = driver->ids))
{
for (; id->name[0]; ++id)
{
if (!rt_strcmp(id->name, client->name))
{
client->id = id;
client->ofw_id = RT_NULL;
return RT_TRUE;
}
}
}
#ifdef RT_USING_OFW
client->ofw_id = rt_ofw_node_match(client->parent.ofw_node, driver->ofw_ids);
if (client->ofw_id)
{
client->id = RT_NULL;
return RT_TRUE;
}
#endif
return RT_FALSE;
}
static rt_err_t i2c_probe(rt_device_t dev)
{
rt_err_t err;
struct rt_i2c_driver *driver = rt_container_of(dev->drv, struct rt_i2c_driver, parent);
struct rt_i2c_client *client = rt_container_of(dev, struct rt_i2c_client, parent);
if (!client->bus)
{
return -RT_EINVAL;
}
err = driver->probe(client);
return err;
}
static rt_err_t i2c_remove(rt_device_t dev)
{
struct rt_i2c_driver *driver = rt_container_of(dev->drv, struct rt_i2c_driver, parent);
struct rt_i2c_client *client = rt_container_of(dev, struct rt_i2c_client, parent);
if (driver && driver->remove)
{
driver->remove(client);
}
return RT_EOK;
}
static rt_err_t i2c_shutdown(rt_device_t dev)
{
struct rt_i2c_driver *driver = rt_container_of(dev->drv, struct rt_i2c_driver, parent);
struct rt_i2c_client *client = rt_container_of(dev, struct rt_i2c_client, parent);
if (driver && driver->shutdown)
{
driver->shutdown(client);
}
return RT_EOK;
}
static struct rt_bus i2c_bus =
{
.name = "i2c",
.match = i2c_match,
.probe = i2c_probe,
.remove = i2c_remove,
.shutdown = i2c_shutdown,
};
static int i2c_bus_init(void)
{
rt_bus_register(&i2c_bus);
return 0;
}
INIT_CORE_EXPORT(i2c_bus_init);

View File

@@ -0,0 +1,153 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-04-25 weety first version
* 2021-04-20 RiceChen added support for bus control api
*/
#include <rtdevice.h>
#define DBG_TAG "I2C"
#ifdef RT_I2C_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif
#include <rtdbg.h>
rt_err_t rt_i2c_bus_device_register(struct rt_i2c_bus_device *bus,
const char *bus_name)
{
rt_err_t res = RT_EOK;
rt_mutex_init(&bus->lock, "i2c_bus_lock", RT_IPC_FLAG_PRIO);
if (bus->timeout == 0) bus->timeout = RT_TICK_PER_SECOND;
res = rt_i2c_bus_device_device_init(bus, bus_name);
LOG_D("I2C bus [%s] registered", bus_name);
#ifdef RT_USING_DM
if (!res)
{
i2c_bus_scan_clients(bus);
}
#endif
return res;
}
struct rt_i2c_bus_device *rt_i2c_bus_device_find(const char *bus_name)
{
struct rt_i2c_bus_device *bus;
rt_device_t dev = rt_device_find(bus_name);
if (dev == RT_NULL || dev->type != RT_Device_Class_I2CBUS)
{
LOG_E("I2C bus %s not exist", bus_name);
return RT_NULL;
}
bus = (struct rt_i2c_bus_device *)dev->user_data;
return bus;
}
rt_ssize_t rt_i2c_transfer(struct rt_i2c_bus_device *bus,
struct rt_i2c_msg msgs[],
rt_uint32_t num)
{
rt_ssize_t ret;
rt_err_t err;
if (bus->ops->master_xfer)
{
#ifdef RT_I2C_DEBUG
for (ret = 0; ret < num; ret++)
{
LOG_D("msgs[%d] %c, addr=0x%02x, len=%d", ret,
(msgs[ret].flags & RT_I2C_RD) ? 'R' : 'W',
msgs[ret].addr, msgs[ret].len);
}
#endif
err = rt_mutex_take(&bus->lock, RT_WAITING_FOREVER);
if (err != RT_EOK)
{
return (rt_ssize_t)err;
}
ret = bus->ops->master_xfer(bus, msgs, num);
err = rt_mutex_release(&bus->lock);
if (err != RT_EOK)
{
return (rt_ssize_t)err;
}
return ret;
}
else
{
LOG_E("I2C bus operation not supported");
return -RT_EINVAL;
}
}
rt_err_t rt_i2c_control(struct rt_i2c_bus_device *bus,
int cmd,
void *args)
{
rt_err_t ret;
if(bus->ops->i2c_bus_control)
{
ret = bus->ops->i2c_bus_control(bus, cmd, args);
return ret;
}
else
{
LOG_E("I2C bus operation not supported");
return -RT_EINVAL;
}
}
rt_ssize_t rt_i2c_master_send(struct rt_i2c_bus_device *bus,
rt_uint16_t addr,
rt_uint16_t flags,
const rt_uint8_t *buf,
rt_uint32_t count)
{
rt_ssize_t ret;
struct rt_i2c_msg msg;
msg.addr = addr;
msg.flags = flags;
msg.len = count;
msg.buf = (rt_uint8_t *)buf;
ret = rt_i2c_transfer(bus, &msg, 1);
return (ret == 1) ? count : ret;
}
rt_ssize_t rt_i2c_master_recv(struct rt_i2c_bus_device *bus,
rt_uint16_t addr,
rt_uint16_t flags,
rt_uint8_t *buf,
rt_uint32_t count)
{
rt_ssize_t ret;
struct rt_i2c_msg msg;
RT_ASSERT(bus != RT_NULL);
msg.addr = addr;
msg.flags = flags | RT_I2C_RD;
msg.len = count;
msg.buf = buf;
ret = rt_i2c_transfer(bus, &msg, 1);
return (ret == 1) ? count : ret;
}

View File

@@ -0,0 +1,137 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-04-25 weety first version
* 2014-08-03 bernard fix some compiling warning
* 2021-04-20 RiceChen added support for bus clock control
*/
#include <rtdevice.h>
#define DBG_TAG "I2C"
#ifdef RT_I2C_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif
#include <rtdbg.h>
static rt_ssize_t i2c_bus_device_read(rt_device_t dev,
rt_off_t pos,
void *buffer,
rt_size_t count)
{
rt_uint16_t addr;
rt_uint16_t flags;
struct rt_i2c_bus_device *bus = (struct rt_i2c_bus_device *)dev->user_data;
RT_ASSERT(bus != RT_NULL);
RT_ASSERT(buffer != RT_NULL);
LOG_D("I2C bus dev [%s] reading %u bytes.", dev->parent.name, count);
addr = pos & 0xffff;
flags = (pos >> 16) & 0xffff;
return rt_i2c_master_recv(bus, addr, flags, (rt_uint8_t *)buffer, count);
}
static rt_ssize_t i2c_bus_device_write(rt_device_t dev,
rt_off_t pos,
const void *buffer,
rt_size_t count)
{
rt_uint16_t addr;
rt_uint16_t flags;
struct rt_i2c_bus_device *bus = (struct rt_i2c_bus_device *)dev->user_data;
RT_ASSERT(bus != RT_NULL);
RT_ASSERT(buffer != RT_NULL);
LOG_D("I2C bus dev [%s] writing %u bytes.", dev->parent.name, count);
addr = pos & 0xffff;
flags = (pos >> 16) & 0xffff;
return rt_i2c_master_send(bus, addr, flags, (const rt_uint8_t *)buffer, count);
}
static rt_err_t i2c_bus_device_control(rt_device_t dev,
int cmd,
void *args)
{
rt_err_t ret;
struct rt_i2c_priv_data *priv_data;
struct rt_i2c_bus_device *bus = (struct rt_i2c_bus_device *)dev->user_data;
RT_ASSERT(bus != RT_NULL);
switch (cmd)
{
/* set 10-bit addr mode */
case RT_I2C_DEV_CTRL_10BIT:
bus->flags |= RT_I2C_ADDR_10BIT;
break;
case RT_I2C_DEV_CTRL_TIMEOUT:
bus->timeout = *(rt_uint32_t *)args;
break;
case RT_I2C_DEV_CTRL_RW:
priv_data = (struct rt_i2c_priv_data *)args;
ret = rt_i2c_transfer(bus, priv_data->msgs, priv_data->number);
if (ret < 0)
{
return -RT_EIO;
}
break;
default:
return rt_i2c_control(bus, cmd, args);
}
return RT_EOK;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops i2c_ops =
{
RT_NULL,
RT_NULL,
RT_NULL,
i2c_bus_device_read,
i2c_bus_device_write,
i2c_bus_device_control
};
#endif
rt_err_t rt_i2c_bus_device_device_init(struct rt_i2c_bus_device *bus,
const char *name)
{
struct rt_device *device;
RT_ASSERT(bus != RT_NULL);
device = &bus->parent;
device->user_data = bus;
/* set device type */
device->type = RT_Device_Class_I2CBUS;
/* initialize device interface */
#ifdef RT_USING_DEVICE_OPS
device->ops = &i2c_ops;
#else
device->init = RT_NULL;
device->open = RT_NULL;
device->close = RT_NULL;
device->read = i2c_bus_device_read;
device->write = i2c_bus_device_write;
device->control = i2c_bus_device_control;
#endif
/* register to device manager */
rt_device_register(device, name, RT_DEVICE_FLAG_RDWR);
return RT_EOK;
}

View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-06 GuEe-GUI first version
*/
#include <rtdevice.h>
#define DBG_TAG "dev.i2c.dm"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#ifdef RT_USING_OFW
static void i2c_parse_timing(struct rt_ofw_node *dev_np, const char *propname,
rt_uint32_t *out_value, rt_uint32_t def_value, rt_bool_t use_defaults)
{
if (rt_ofw_prop_read_u32(dev_np, propname, out_value) && use_defaults)
{
*out_value = def_value;
}
}
rt_err_t i2c_timings_ofw_parse(struct rt_ofw_node *dev_np, struct i2c_timings *timings,
rt_bool_t use_defaults)
{
rt_ubase_t def;
rt_bool_t udef = use_defaults;
struct i2c_timings *t = timings;
i2c_parse_timing(dev_np, "clock-frequency", &t->bus_freq_hz, I2C_MAX_STANDARD_MODE_FREQ, udef);
def = t->bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ ? 1000 : t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ ? 300 : 120;
i2c_parse_timing(dev_np, "i2c-scl-rising-time-ns", &t->scl_rise_ns, def, udef);
def = t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ ? 300 : 120;
i2c_parse_timing(dev_np, "i2c-scl-falling-time-ns", &t->scl_fall_ns, def, udef);
i2c_parse_timing(dev_np, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns, 0, udef);
i2c_parse_timing(dev_np, "i2c-sda-falling-time-ns", &t->sda_fall_ns, t->scl_fall_ns, udef);
i2c_parse_timing(dev_np, "i2c-sda-hold-time-ns", &t->sda_hold_ns, 0, udef);
i2c_parse_timing(dev_np, "i2c-digital-filter-width-ns", &t->digital_filter_width_ns, 0, udef);
i2c_parse_timing(dev_np, "i2c-analog-filter-cutoff-frequency", &t->analog_filter_cutoff_freq_hz, 0, udef);
return RT_EOK;
}
#endif /* RT_USING_OFW */

View File

@@ -0,0 +1,274 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-30 sp-cai first version
*/
#include <rtdevice.h>
#ifdef RT_USING_SOFT_I2C
#if !defined(RT_USING_SOFT_I2C0) &&\
!defined(RT_USING_SOFT_I2C1) && !defined(RT_USING_SOFT_I2C2) &&\
!defined(RT_USING_SOFT_I2C3) && !defined(RT_USING_SOFT_I2C4) &&\
!defined(RT_USING_SOFT_I2C5) && !defined(RT_USING_SOFT_I2C6) &&\
!defined(RT_USING_SOFT_I2C7) && !defined(RT_USING_SOFT_I2C8)
#error "Please define at least one RT_USING_SOFT_I2Cx"
/*
This driver can be disabled at:
menuconfig -> RT-Thread Components -> Device Drivers -> Using I2C device drivers
*/
#endif
#define DBG_ENABLE
#define DBG_TAG "I2C_S"
#ifdef RT_I2C_BITOPS_DEBUG
#define DBG_LEVEL DBG_LOG
#endif
#include <rtdbg.h>
/* i2c config class */
struct soft_i2c_config
{
rt_base_t scl_pin;
rt_base_t sda_pin;
const char *bus_name;
rt_uint16_t timing_delay; /* scl and sda line delay */
rt_uint16_t timing_timeout; /* in tick */
};
/* i2c dirver class */
struct rt_soft_i2c
{
struct rt_i2c_bus_device i2c_bus;
struct rt_i2c_bit_ops ops;
};
struct soft_i2c_config i2c_cfg[] =
{
#ifdef RT_USING_SOFT_I2C0
{
.scl_pin = RT_SOFT_I2C0_SCL_PIN,
.sda_pin = RT_SOFT_I2C0_SDA_PIN,
.bus_name = RT_SOFT_I2C0_BUS_NAME,
.timing_delay = RT_SOFT_I2C0_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C0_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C0
#ifdef RT_USING_SOFT_I2C1
{
.scl_pin = RT_SOFT_I2C1_SCL_PIN,
.sda_pin = RT_SOFT_I2C1_SDA_PIN,
.bus_name = RT_SOFT_I2C1_BUS_NAME,
.timing_delay = RT_SOFT_I2C1_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C1_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C1
#ifdef RT_USING_SOFT_I2C2
{
.scl_pin = RT_SOFT_I2C2_SCL_PIN,
.sda_pin = RT_SOFT_I2C2_SDA_PIN,
.bus_name = RT_SOFT_I2C2_BUS_NAME,
.timing_delay = RT_SOFT_I2C2_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C2_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C2
#ifdef RT_USING_SOFT_I2C3
{
.scl_pin = RT_SOFT_I2C3_SCL_PIN,
.sda_pin = RT_SOFT_I2C3_SDA_PIN,
.bus_name = RT_SOFT_I2C3_BUS_NAME,
.timing_delay = RT_SOFT_I2C3_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C3_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C3
#ifdef RT_USING_SOFT_I2C4
{
.scl_pin = RT_SOFT_I2C4_SCL_PIN,
.sda_pin = RT_SOFT_I2C4_SDA_PIN,
.bus_name = RT_SOFT_I2C4_BUS_NAME,
.timing_delay = RT_SOFT_I2C4_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C4_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C4
#ifdef RT_USING_SOFT_I2C5
{
.scl_pin = RT_SOFT_I2C5_SCL_PIN,
.sda_pin = RT_SOFT_I2C5_SDA_PIN,
.bus_name = RT_SOFT_I2C5_BUS_NAME,
.timing_delay = RT_SOFT_I2C5_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C5_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C5
#ifdef RT_USING_SOFT_I2C6
{
.scl_pin = RT_SOFT_I2C6_SCL_PIN,
.sda_pin = RT_SOFT_I2C6_SDA_PIN,
.bus_name = RT_SOFT_I2C6_BUS_NAME,
.timing_delay = RT_SOFT_I2C6_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C6_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C6
#ifdef RT_USING_SOFT_I2C7
{
.scl_pin = RT_SOFT_I2C7_SCL_PIN,
.sda_pin = RT_SOFT_I2C7_SDA_PIN,
.bus_name = RT_SOFT_I2C7_BUS_NAME,
.timing_delay = RT_SOFT_I2C7_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C7_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C7
#ifdef RT_USING_SOFT_I2C8
{
.scl_pin = RT_SOFT_I2C8_SCL_PIN,
.sda_pin = RT_SOFT_I2C8_SDA_PIN,
.bus_name = RT_SOFT_I2C8_BUS_NAME,
.timing_delay = RT_SOFT_I2C8_TIMING_DELAY,
.timing_timeout = RT_SOFT_I2C8_TIMING_TIMEOUT,
},
#endif //RT_USING_SOFT_I2C8
};
static struct rt_soft_i2c i2c_bus_obj[sizeof(i2c_cfg) / sizeof(i2c_cfg[0])] =
{ 0 };
/**
* This function initializes the i2c pin.
* @param i2c config class.
*/
static void pin_init(const struct soft_i2c_config *cfg)
{
rt_pin_mode(cfg->scl_pin, PIN_MODE_OUTPUT_OD);
rt_pin_mode(cfg->sda_pin, PIN_MODE_OUTPUT_OD);
rt_pin_write(cfg->scl_pin, PIN_HIGH);
rt_pin_write(cfg->sda_pin, PIN_HIGH);
}
/**
* This function sets the sda pin.
* @param i2c config class.
* @param The sda pin state.
*/
static void set_sda(void *cfg, rt_int32_t value)
{
rt_pin_write(((const struct soft_i2c_config*)cfg)->sda_pin, value);
}
/**
* This function sets the scl pin.
* @param i2c config class.
* @param The sda pin state.
*/
static void set_scl(void *cfg, rt_int32_t value)
{
rt_pin_write(((const struct soft_i2c_config*)cfg)->scl_pin, value);
}
/**
* This function gets the sda pin state.
* @param i2c config class.
*/
static rt_int32_t get_sda(void *cfg)
{
return rt_pin_read(((const struct soft_i2c_config*)cfg)->sda_pin);
}
/**
* This function gets the scl pin state.
* @param i2c config class.
*/
static rt_int32_t get_scl(void *cfg)
{
return rt_pin_read(((const struct soft_i2c_config*)cfg)->scl_pin);
}
static const struct rt_i2c_bit_ops soft_i2c_ops =
{
.set_sda = set_sda,
.set_scl = set_scl,
.get_sda = get_sda,
.get_scl = get_scl,
.udelay = rt_hw_us_delay,
};
/**
* if i2c is locked, this function will unlock it
*
* @param i2c config class.
*
* @return RT_EOK indicates successful unlock.
*/
static rt_err_t i2c_bus_unlock(const struct soft_i2c_config *cfg)
{
rt_ubase_t i = 0;
if(PIN_LOW == rt_pin_read(cfg->sda_pin))
{
while(i++ < 9)
{
rt_pin_write(cfg->scl_pin, PIN_HIGH);
rt_hw_us_delay(cfg->timing_delay);
rt_pin_write(cfg->scl_pin, PIN_LOW);
rt_hw_us_delay(cfg->timing_delay);
}
}
if(PIN_LOW == rt_pin_read(cfg->sda_pin))
{
return -RT_ERROR;
}
return RT_EOK;
}
/* I2C initialization function */
int rt_soft_i2c_init(void)
{
int err = RT_EOK;
struct rt_soft_i2c *obj;
int i;
for(i = 0; i < sizeof(i2c_bus_obj) / sizeof(i2c_bus_obj[0]); i++)
{
struct soft_i2c_config *cfg = &i2c_cfg[i];
pin_init(cfg);
obj = &i2c_bus_obj[i];
obj->ops = soft_i2c_ops;
obj->ops.data = cfg;
obj->i2c_bus.priv = &obj->ops;
obj->ops.delay_us = cfg->timing_delay;
obj->ops.timeout = cfg->timing_timeout;
if(rt_i2c_bit_add_bus(&obj->i2c_bus, cfg->bus_name) == RT_EOK)
{
i2c_bus_unlock(cfg);
LOG_D("Software simulation %s init done"
", SCL pin: 0x%02X, SDA pin: 0x%02X"
, cfg->bus_name
, cfg->scl_pin
, cfg->sda_pin
);
}
else
{
err++;
LOG_E("Software simulation %s init fail"
", SCL pin: 0x%02X, SDA pin: 0x%02X"
, cfg->bus_name
, cfg->scl_pin
, cfg->sda_pin
);
}
}
return err;
}
INIT_PREV_EXPORT(rt_soft_i2c_init);
#endif // RT_USING_SOFT_I2C

View File

@@ -0,0 +1,15 @@
from building import *
group = []
if not GetDepend(['RT_USING_DM']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../include']
src = ['iio.c',]
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,71 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#include <rtthread.h>
#include <rtdevice.h>
static void *ofw_iio_channel_get_by_index(struct rt_ofw_node *np, int index, int *out_channel)
{
void *iio = RT_NULL;
#ifdef RT_USING_OFW
struct rt_ofw_node *iio_np;
struct rt_ofw_cell_args iio_args;
if (!rt_ofw_parse_phandle_cells(np, "io-channels", "#io-channel-cells", index, &iio_args))
{
iio_np = iio_args.data;
if (!rt_ofw_data(iio_np))
{
rt_platform_ofw_request(iio_np);
}
iio = rt_ofw_data(iio_np);
rt_ofw_node_put(iio_np);
if (out_channel)
{
*out_channel = iio_args.args[0];
}
}
#endif /* RT_USING_OFW */
return iio;
}
void *rt_iio_channel_get_by_index(struct rt_device *dev, int index, int *out_channel)
{
void *iio = RT_NULL;
if (!dev || index < 0)
{
return RT_NULL;
}
if (dev->ofw_node)
{
iio = ofw_iio_channel_get_by_index(dev->ofw_node, index, out_channel);
}
return iio;
}
void *rt_iio_channel_get_by_name(struct rt_device *dev, const char *name, int *out_channel)
{
int index;
if (!dev || !name)
{
return RT_NULL;
}
index = rt_dm_dev_prop_index_of_string(dev, "io-channel-names", name);
return rt_iio_channel_get_by_index(dev, index, out_channel);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -14,12 +14,65 @@
#define __ADC_H__
#include <rtthread.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup ADC ADC
*
* @brief ADC driver api
*
* <b>Example</b>
* @code {.c}
* #define ADC_DEV_NAME "adc1"
* #define ADC_DEV_CHANNEL 5
* #define REFER_VOLTAGE 330
* #define CONVERT_BITS (1 << 12)
*
* static int adc_vol_sample(int argc, char *argv[])
* {
* rt_adc_device_t adc_dev;
* rt_uint32_t value, vol;
*
* rt_err_t ret = RT_EOK;
*
* adc_dev = (rt_adc_device_t)rt_device_find(ADC_DEV_NAME);
* if (adc_dev == RT_NULL)
* {
* rt_kprintf("adc sample run failed! can't find %s device!\n", ADC_DEV_NAME);
* return -RT_ERROR;
* }
*
* ret = rt_adc_enable(adc_dev, ADC_DEV_CHANNEL);
*
* value = rt_adc_read(adc_dev, ADC_DEV_CHANNEL);
* rt_kprintf("the value is :%d \n", value);
*
* vol = value * REFER_VOLTAGE / CONVERT_BITS;
* rt_kprintf("the voltage is :%d.%02d \n", vol / 100, vol % 100);
*
* ret = rt_adc_disable(adc_dev, ADC_DEV_CHANNEL);
*
* return ret;
* }
* MSH_CMD_EXPORT(adc_vol_sample, adc voltage convert sample);
*
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup ADC
* @{
*/
#define RT_ADC_INTERN_CH_TEMPER (-1)
#define RT_ADC_INTERN_CH_VREF (-2)
#define RT_ADC_INTERN_CH_VBAT (-3)
struct rt_adc_device;
/**
* @brief Configure the adc device
*/
struct rt_adc_ops
{
rt_err_t (*enabled)(struct rt_adc_device *device, rt_int8_t channel, rt_bool_t enabled);
@@ -27,7 +80,9 @@ struct rt_adc_ops
rt_uint8_t (*get_resolution)(struct rt_adc_device *device);
rt_int16_t (*get_vref) (struct rt_adc_device *device);
};
/**
* @brief adc device
*/
struct rt_adc_device
{
struct rt_device parent;
@@ -43,10 +98,53 @@ typedef enum
RT_ADC_CMD_GET_VREF = RT_DEVICE_CTRL_BASE(ADC) + 4, /* get reference voltage */
} rt_adc_cmd_t;
/**
* @brief register the adc device
* @param adc adc device
* @param name device name
* @param ops device ops
* @param user_data device private data
* @return rt_err_t error code
* @ingroup ADC
*/
rt_err_t rt_hw_adc_register(rt_adc_device_t adc,const char *name, const struct rt_adc_ops *ops, const void *user_data);
/**
* @brief read the adc value
* @param dev adc device
* @param channel adc channel
* @return rt_uint32_t adc value
* @ingroup ADC
*/
rt_uint32_t rt_adc_read(rt_adc_device_t dev, rt_int8_t channel);
/**
* @brief enable the adc channel
* @param dev adc device
* @param channel adc channel
* @return rt_err_t error code
* @ingroup ADC
*/
rt_err_t rt_adc_enable(rt_adc_device_t dev, rt_int8_t channel);
/**
* @brief disable the adc channel
* @param dev adc device
* @param channel adc channel
* @return rt_err_t error code
* @ingroup ADC
*/
rt_err_t rt_adc_disable(rt_adc_device_t dev, rt_int8_t channel);
/**
* @brief get the adc resolution
* @param dev adc device
* @param channel adc channel
* @return rt_int16_t adc resolution
* @ingroup ADC
*/
rt_int16_t rt_adc_voltage(rt_adc_device_t dev, rt_int8_t channel);
/*! @}*/
#endif /* __ADC_H__ */

View File

@@ -0,0 +1,397 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#ifndef __AHCI_H__
#define __AHCI_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/scsi.h>
#include <drivers/misc.h>
struct rt_ahci_ops;
/* Generic Host Control */
#define RT_AHCI_HBA_CAP 0x00 /* Host capability*/
#define RT_AHCI_CAP_NP RT_GENMASK(4, 0) /* Number of Ports */
#define RT_AHCI_CAP_NCS RT_GENMASK(8, 12) /* Number of Command Slots */
#define RT_AHCI_CAP_PSC RT_BIT(13) /* Partial State Capable */
#define RT_AHCI_CAP_SSC RT_BIT(14) /* Slumber capable */
#define RT_AHCI_CAP_PMD RT_BIT(15) /* PIO Multiple DRQ Block */
#define RT_AHCI_CAP_SPM RT_BIT(17) /* Port Multiplier */
#define RT_AHCI_CAP_AHCI RT_BIT(18) /* AHCI only */
#define RT_AHCI_CAP_SNZO RT_BIT(19) /* Non-Zero DMA Offsets */
#define RT_AHCI_CAP_ISS RT_GENMASK(23, 20) /* Interface Speed Support */
#define RT_AHCI_CAP_CLO RT_BIT(24) /* Command List Override support */
#define RT_AHCI_CAP_SAL RT_BIT(25) /* Activity LED */
#define RT_AHCI_CAP_SALP RT_BIT(26) /* Aggressive Link Power Management */
#define RT_AHCI_CAP_SSS RT_BIT(27) /* Staggered Spin-up */
#define RT_AHCI_CAP_SIS RT_BIT(28) /* Interlock Switch */
#define RT_AHCI_CAP_NCQ RT_BIT(30) /* Native Command Queueing */
#define RT_AHCI_CAP_64 RT_BIT(31) /* PCI DAC (64-bit DMA) support */
#define RT_AHCI_HBA_GHC 0x04 /* Global host control */
#define RT_AHCI_GHC_RESET RT_BIT(0) /* Reset controller; self-clear */
#define RT_AHCI_GHC_IRQ_EN RT_BIT(1) /* Global IRQ enable */
#define RT_AHCI_GHC_AHCI_EN RT_BIT(31) /* AHCI enabled */
#define RT_AHCI_HBA_INTS 0x08 /* Interrupt status */
#define RT_AHCI_HBA_PI 0x0c /* Port implemented */
#define RT_AHCI_HBA_VS 0x10 /* Version */
#define RT_AHCI_HBA_CCC_CTL 0x14 /* Command completion coalescing control */
#define RT_AHCI_HBA_CCC_PTS 0x18 /* Command completion coalescing ports */
#define RT_AHCI_HBA_EM_LOC 0x1c /* Enclosure management location */
#define RT_AHCI_HBA_EM_CTL 0x20 /* Enclosure management control */
#define RT_AHCI_HBA_CAP2 0x24 /* Host capabilities extended */
#define RT_AHCI_HBA_BOHC 0x28 /* BIOS/OS handoff control and status */
#define RT_AHCI_HBA_VENDOR 0xa0 /* Vendor specific registers (0xa0 - 0xff) */
#define RT_AHCI_PORT_CLB 0x00 /* Command list base address, 1K-byte aligned */
#define RT_AHCI_PORT_CLBU 0x04 /* Command list base address upper 32 bits */
#define RT_AHCI_PORT_FB 0x08 /* FIS base address, 256-byte aligned */
#define RT_AHCI_PORT_FBU 0x0C /* FIS base address upper 32 bits */
#define RT_AHCI_PORT_INTS 0x10 /* Interrupt status */
#define RT_AHCI_PORT_INTE 0x14 /* Interrupt enable */
#define RT_AHCI_PORT_INTE_D2H_REG_FIS RT_BIT(0) /* D2H Register FIS rx'd */
#define RT_AHCI_PORT_INTE_PIOS_FIS RT_BIT(1) /* PIO Setup FIS rx'd */
#define RT_AHCI_PORT_INTE_DMAS_FIS RT_BIT(2) /* DMA Setup FIS rx'd */
#define RT_AHCI_PORT_INTE_SDB_FIS RT_BIT(3) /* Set Device Bits FIS rx'd */
#define RT_AHCI_PORT_INTE_UNK_FIS RT_BIT(4) /* Unknown FIS rx'd */
#define RT_AHCI_PORT_INTE_SG_DONE RT_BIT(5) /* Descriptor processed */
#define RT_AHCI_PORT_INTE_CONNECT RT_BIT(6) /* Port connect change status */
#define RT_AHCI_PORT_INTE_DMPS RT_BIT(7) /* Mechanical presence status */
#define RT_AHCI_PORT_INTE_PHYRDY RT_BIT(22) /* PhyRdy changed */
#define RT_AHCI_PORT_INTE_BAD_PMP RT_BIT(23) /* Incorrect port multiplier */
#define RT_AHCI_PORT_INTE_OVERFLOW RT_BIT(24) /* Xfer exhausted available S/G */
#define RT_AHCI_PORT_INTE_IF_NONFATAL RT_BIT(26) /* Interface non-fatal error */
#define RT_AHCI_PORT_INTE_IF_ERR RT_BIT(27) /* Interface fatal error */
#define RT_AHCI_PORT_INTE_HBUS_DATA_ERR RT_BIT(28) /* Host bus data error */
#define RT_AHCI_PORT_INTE_HBUS_ERR RT_BIT(29) /* Host bus fatal error */
#define RT_AHCI_PORT_INTE_TF_ERR RT_BIT(30) /* Task file error */
#define RT_AHCI_PORT_INTE_COLD_PRES RT_BIT(31) /* Cold presence detect */
#define RT_AHCI_PORT_CMD 0x18 /* Command and status */
#define RT_AHCI_PORT_CMD_START RT_BIT(0) /* Enable port DMA engine */
#define RT_AHCI_PORT_CMD_SPIN_UP RT_BIT(1) /* Spin up device */
#define RT_AHCI_PORT_CMD_POWER_ON RT_BIT(2) /* Power up device */
#define RT_AHCI_PORT_CMD_CLO RT_BIT(3) /* Command list override */
#define RT_AHCI_PORT_CMD_FIS_RX RT_BIT(4) /* Enable FIS receive DMA engine */
#define RT_AHCI_PORT_CMD_FIS_ON RT_BIT(14) /* FIS DMA engine running */
#define RT_AHCI_PORT_CMD_LIST_ON RT_BIT(15) /* cmd list DMA engine running */
#define RT_AHCI_PORT_CMD_ATAPI RT_BIT(24) /* Device is ATAPI */
#define RT_AHCI_PORT_CMD_ACTIVE RT_BIT(28) /* Active state */
#define RT_AHCI_PORT_TFD 0x20 /* Task file data */
#define RT_AHCI_PORT_TFDATA_ERR RT_BIT(0) /* Indicates an error during the transfer */
#define RT_AHCI_PORT_TFDATA_DRQ RT_BIT(3) /* Indicates a data transfer is requested */
#define RT_AHCI_PORT_TFDATA_BSY RT_BIT(7) /* Indicates the interface is busy */
#define RT_AHCI_PORT_SIG 0x24 /* Signature */
#define RT_AHCI_PORT_SIG_REG_MASK 0xff
#define RT_AHCI_PORT_SIG_SECTOR_NR_SHIFT 0 /* Sector Count Register */
#define RT_AHCI_PORT_SIG_LBA_LOW_SHIFT 8 /* LBA Low Register */
#define RT_AHCI_PORT_SIG_LBA_MID_SHIFT 16 /* LBA Mid Register */
#define RT_AHCI_PORT_SIG_LBA_HIGH_SHIFT 24 /* LBA High Register */
#define RT_AHCI_PORT_SIG_SATA_CDROM 0xeb140101
#define RT_AHCI_PORT_SIG_SATA_DISK 0x00000101
#define RT_AHCI_PORT_SSTS 0x28 /* SATA status (SCR0:SStatus) */
#define RT_AHCI_PORT_SSTS_DET_MASK 0x3
#define RT_AHCI_PORT_SSTS_DET_COMINIT 0x1
#define RT_AHCI_PORT_SSTS_DET_PHYRDY 0x3
#define RT_AHCI_PORT_SCTL 0x2c /* SATA control (SCR2:SControl) */
#define RT_AHCI_PORT_SERR 0x30 /* SATA error (SCR1:SError) */
#define RT_AHCI_PORT_SERR_ERR_I RT_BIT(0) /* Recovered Data Integrity Error */
#define RT_AHCI_PORT_SERR_ERR_M RT_BIT(1) /* Recovered Communications Error */
#define RT_AHCI_PORT_SERR_ERR_T RT_BIT(8) /* Transient Data Integrity Error */
#define RT_AHCI_PORT_SERR_ERR_C RT_BIT(9) /* Persistent Communication or Data Integrity Error */
#define RT_AHCI_PORT_SERR_ERR_P RT_BIT(10) /* Protocol Error */
#define RT_AHCI_PORT_SERR_ERR_E RT_BIT(11) /* Internal Error */
#define RT_AHCI_PORT_SERR_DIAG_N RT_BIT(16) /* PhyRdy Change */
#define RT_AHCI_PORT_SERR_DIAG_I RT_BIT(17) /* Phy Internal Error */
#define RT_AHCI_PORT_SERR_DIAG_W RT_BIT(18) /* Comm Wake */
#define RT_AHCI_PORT_SERR_DIAG_B RT_BIT(19) /* 10B to 8B Decode Error */
#define RT_AHCI_PORT_SERR_DIAG_D RT_BIT(20) /* Disparity Error */
#define RT_AHCI_PORT_SERR_DIAG_C RT_BIT(21) /* CRC Error */
#define RT_AHCI_PORT_SERR_DIAG_H RT_BIT(22) /* Handshake Error */
#define RT_AHCI_PORT_SERR_DIAG_S RT_BIT(23) /* Link Sequence Error */
#define RT_AHCI_PORT_SERR_DIAG_T RT_BIT(24) /* Transport state transition error */
#define RT_AHCI_PORT_SERR_DIAG_F RT_BIT(25) /* Unknown FIS Type */
#define RT_AHCI_PORT_SERR_DIAG_X RT_BIT(26) /* Exchanged */
#define RT_AHCI_PORT_SACT 0x34 /* SATA active (SCR3:SActive) */
#define RT_AHCI_PORT_CI 0x38 /* Command issue */
#define RT_AHCI_PORT_SNTF 0x3c /* SATA notification (SCR4:SNotification) */
#define RT_AHCI_PORT_FBS 0x40 /* FIS-based switch control */
#define RT_AHCI_PORT_VENDOR 0x70 /* Vendor specific (0x70 - 0x7f) */
#define RT_AHCI_MAX_SG 56
#define RT_AHCI_CMD_SLOT_SIZE 32
#define RT_AHCI_MAX_CMD_SLOT 32
#define RT_AHCI_RX_FIS_SIZE 256
#define RT_AHCI_CMD_TBL_HDR 0x80
#define RT_AHCI_CMD_TBL_CDB 0x40
#define RT_AHCI_CMD_TBL_SIZE RT_AHCI_CMD_TBL_HDR + (RT_AHCI_MAX_SG * 16)
#define RT_AHCI_DMA_SIZE (RT_AHCI_CMD_SLOT_SIZE * RT_AHCI_MAX_CMD_SLOT + RT_AHCI_CMD_TBL_SIZE + RT_AHCI_RX_FIS_SIZE)
#define RT_ACHI_PRDT_BYTES_MAX (4 * 1024 * 1024)
#define RT_AHCI_FIS_TYPE_REG_H2D 0x27 /* Register FIS - host to device */
#define RT_AHCI_FIS_TYPE_REG_D2H 0x34 /* Register FIS - device to host */
#define RT_AHCI_FIS_TYPE_DMA_ACT 0x39 /* DMA activate FIS - device to host */
#define RT_AHCI_FIS_TYPE_DMA_SETUP 0x41 /* DMA setup FIS - bidirectional */
#define RT_AHCI_FIS_TYPE_DATA 0x46 /* Data FIS - bidirectional */
#define RT_AHCI_FIS_TYPE_BIST 0x58 /* BIST activate FIS - bidirectional */
#define RT_AHCI_FIS_TYPE_PIO_SETUP 0x5f /* PIO setup FIS - device to host */
#define RT_AHCI_FIS_TYPE_DEV_BITS 0xa1 /* Set device bits FIS - device to host */
#define RT_AHCI_ATA_ID_WORDS 256
#define RT_AHCI_ATA_ID_CONFIG 0
#define RT_AHCI_ATA_ID_CYLS 1
#define RT_AHCI_ATA_ID_HEADS 3
#define RT_AHCI_ATA_ID_SECTORS 6
#define RT_AHCI_ATA_ID_SERNO 10
#define RT_AHCI_ATA_ID_BUF_SIZE 21
#define RT_AHCI_ATA_ID_FW_REV 23
#define RT_AHCI_ATA_ID_PROD 27
#define RT_AHCI_ATA_ID_MAX_MULTSECT 47
#define RT_AHCI_ATA_ID_DWORD_IO 48
#define RT_AHCI_ATA_ID_TRUSTED 48
#define RT_AHCI_ATA_ID_CAPABILITY 49
#define RT_AHCI_ATA_ID_OLD_PIO_MODES 51
#define RT_AHCI_ATA_ID_OLD_DMA_MODES 52
#define RT_AHCI_ATA_ID_FIELD_VALID 53
#define RT_AHCI_ATA_ID_CUR_CYLS 54
#define RT_AHCI_ATA_ID_CUR_HEADS 55
#define RT_AHCI_ATA_ID_CUR_SECTORS 56
#define RT_AHCI_ATA_ID_MULTSECT 59
#define RT_AHCI_ATA_ID_LBA_CAPACITY 60
#define RT_AHCI_ATA_ID_SWDMA_MODES 62
#define RT_AHCI_ATA_ID_MWDMA_MODES 63
#define RT_AHCI_ATA_ID_PIO_MODES 64
#define RT_AHCI_ATA_ID_EIDE_DMA_MIN 65
#define RT_AHCI_ATA_ID_EIDE_DMA_TIME 66
#define RT_AHCI_ATA_ID_EIDE_PIO 67
#define RT_AHCI_ATA_ID_EIDE_PIO_IORDY 68
#define RT_AHCI_ATA_ID_ADDITIONAL_SUPP 69
#define RT_AHCI_ATA_ID_QUEUE_DEPTH 75
#define RT_AHCI_ATA_ID_SATA_CAPABILITY 76
#define RT_AHCI_ATA_ID_SATA_CAPABILITY_2 77
#define RT_AHCI_ATA_ID_FEATURE_SUPP 78
#define RT_AHCI_ATA_ID_MAJOR_VER 80
#define RT_AHCI_ATA_ID_COMMAND_SET_1 82
#define RT_AHCI_ATA_ID_COMMAND_SET_2 83
#define RT_AHCI_ATA_ID_CFSSE 84
#define RT_AHCI_ATA_ID_CFS_ENABLE_1 85
#define RT_AHCI_ATA_ID_CFS_ENABLE_2 86
#define RT_AHCI_ATA_ID_CSF_DEFAULT 87
#define RT_AHCI_ATA_ID_UDMA_MODES 88
#define RT_AHCI_ATA_ID_HW_CONFIG 93
#define RT_AHCI_ATA_ID_SPG 98
#define RT_AHCI_ATA_ID_LBA_CAPACITY_2 100
#define RT_AHCI_ATA_ID_SECTOR_SIZE 106
#define RT_AHCI_ATA_ID_WWN 108
#define RT_AHCI_ATA_ID_LOGICAL_SECTOR_SIZE 117
#define RT_AHCI_ATA_ID_COMMAND_SET_3 119
#define RT_AHCI_ATA_ID_COMMAND_SET_4 120
#define RT_AHCI_ATA_ID_LAST_LUN 126
#define RT_AHCI_ATA_ID_DLF 128
#define RT_AHCI_ATA_ID_CSFO 129
#define RT_AHCI_ATA_ID_CFA_POWER 160
#define RT_AHCI_ATA_ID_CFA_KEY_MGMT 162
#define RT_AHCI_ATA_ID_CFA_MODES 163
#define RT_AHCI_ATA_ID_DATA_SET_MGMT 169
#define RT_AHCI_ATA_ID_SCT_CMD_XPORT 206
#define RT_AHCI_ATA_ID_ROT_SPEED 217
#define RT_AHCI_ATA_ID_PIO4 (1 << 1)
#define RT_AHCI_ATA_ID_SERNO_LEN 20
#define RT_AHCI_ATA_ID_FW_REV_LEN 8
#define RT_AHCI_ATA_ID_PROD_LEN 40
#define RT_AHCI_ATA_ID_WWN_LEN 8
#define RT_AHCI_ATA_CMD_DSM 0x06
#define RT_AHCI_ATA_CMD_DEV_RESET 0x08 /* ATAPI device reset */
#define RT_AHCI_ATA_CMD_PIO_READ 0x20 /* Read sectors with retry */
#define RT_AHCI_ATA_CMD_PIO_READ_EXT 0x24
#define RT_AHCI_ATA_CMD_READ_EXT 0x25
#define RT_AHCI_ATA_CMD_READ_NATIVE_MAX_EXT 0x27
#define RT_AHCI_ATA_CMD_READ_MULTI_EXT 0x29
#define RT_AHCI_ATA_CMD_READ_LOG_EXT 0x2f
#define RT_AHCI_ATA_CMD_PIO_WRITE 0x30 /* Write sectors with retry */
#define RT_AHCI_ATA_CMD_PIO_WRITE_EXT 0x34
#define RT_AHCI_ATA_CMD_WRITE_EXT 0x35
#define RT_AHCI_ATA_CMD_SET_MAX_EXT 0x37
#define RT_AHCI_ATA_CMD_WRITE_MULTI_EXT 0x39
#define RT_AHCI_ATA_CMD_WRITE_FUA_EXT 0x3d
#define RT_AHCI_ATA_CMD_VERIFY 0x40 /* Read verify sectors with retry */
#define RT_AHCI_ATA_CMD_VERIFY_EXT 0x42
#define RT_AHCI_ATA_CMD_FPDMA_READ 0x60
#define RT_AHCI_ATA_CMD_FPDMA_WRITE 0x61
#define RT_AHCI_ATA_CMD_EDD 0x90 /* Execute device diagnostic */
#define RT_AHCI_ATA_CMD_INIT_DEV_PARAMS 0x91 /* Initialize device parameters */
#define RT_AHCI_ATA_CMD_PACKET 0xa0 /* ATAPI packet */
#define RT_AHCI_ATA_CMD_ID_ATAPI 0xa1 /* ATAPI identify device */
#define RT_AHCI_ATA_CMD_CONF_OVERLAY 0xb1
#define RT_AHCI_ATA_CMD_READ_MULTI 0xc4 /* Read multiple */
#define RT_AHCI_ATA_CMD_WRITE_MULTI 0xc5 /* Write multiple */
#define RT_AHCI_ATA_CMD_SET_MULTI 0xc6 /* Set multiple mode */
#define RT_AHCI_ATA_CMD_READ 0xc8 /* Read DMA with retry */
#define RT_AHCI_ATA_CMD_WRITE 0xca /* Write DMA with retry */
#define RT_AHCI_ATA_CMD_WRITE_MULTI_FUA_EXT 0xce
#define RT_AHCI_ATA_CMD_STANDBYNOW1 0xe0 /* Standby immediate */
#define RT_AHCI_ATA_CMD_IDLEIMMEDIATE 0xe1 /* Idle immediate */
#define RT_AHCI_ATA_CMD_STANDBY 0xe2 /* Place in standby power mode */
#define RT_AHCI_ATA_CMD_IDLE 0xe3 /* Place in idle power mode */
#define RT_AHCI_ATA_CMD_PMP_READ 0xe4 /* Read buffer */
#define RT_AHCI_ATA_CMD_CHK_POWER 0xe5 /* Check power mode */
#define RT_AHCI_ATA_CMD_SLEEP 0xe6 /* Sleep */
#define RT_AHCI_ATA_CMD_FLUSH 0xe7
#define RT_AHCI_ATA_CMD_PMP_WRITE 0xe8 /* Write buffer */
#define RT_AHCI_ATA_CMD_FLUSH_EXT 0xea
#define RT_AHCI_ATA_CMD_ID_ATA 0xec /* Identify device */
#define RT_AHCI_ATA_CMD_SET_FEATURES 0xef /* Set features */
#define RT_AHCI_ATA_CMD_SEC_FREEZE_LOCK 0xf5 /* Security freeze */
#define RT_AHCI_ATA_CMD_READ_NATIVE_MAX 0xf8
#define RT_AHCI_ATA_CMD_SET_MAX 0xf9
#define RT_AHCI_ATA_DSM_TRIM 0x01
#define RT_AHCI_ATA_PROT_FLAG_PIO RT_BIT(0)
#define RT_AHCI_ATA_PROT_FLAG_DMA RT_BIT(1)
#define RT_AHCI_ATA_PROT_FLAG_NCQ RT_BIT(2)
#define RT_AHCI_ATA_PROT_FLAG_ATAPI RT_BIT(3)
#define rt_ahci_ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0)
#define rt_ahci_ata_id_has_lba(id) ((id)[49] & (1 << 9))
#define rt_ahci_ata_id_has_dma(id) ((id)[49] & (1 << 8))
#define rt_ahci_ata_id_has_ncq(id) ((id)[76] & (1 << 8))
#define rt_ahci_ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1)
#define rt_ahci_ata_id_removeable(id) ((id)[0] & (1 << 7))
#define rt_ahci_ata_id_iordy_disable(id) ((id)[49] & (1 << 10))
#define rt_ahci_ata_id_has_iordy(id) ((id)[49] & (1 << 11))
#define rt_ahci_ata_id_u32(id, n) (((rt_uint32_t)(id)[(n) + 1] << 16) | ((rt_uint32_t) (id)[(n)]))
#define rt_ahci_ata_id_u64(id, n) (((rt_uint64_t)(id)[(n) + 3] << 48) | ((rt_uint64_t)(id)[(n) + 2] << 32) | \
((rt_uint64_t)(id)[(n) + 1] << 16) | ((rt_uint64_t)(id)[(n) + 0]) )
rt_inline rt_bool_t rt_ahci_ata_id_has_lba48(const rt_uint16_t *id)
{
if ((id[RT_AHCI_ATA_ID_COMMAND_SET_2] & 0xc000) != 0x4000 ||
!rt_ahci_ata_id_u64(id, RT_AHCI_ATA_ID_LBA_CAPACITY_2))
{
return 0;
}
return !!(id[RT_AHCI_ATA_ID_COMMAND_SET_2] & (1 << 10));
}
rt_inline rt_uint64_t rt_ahci_ata_id_n_sectors(rt_uint16_t *id)
{
if (rt_ahci_ata_id_has_lba(id))
{
if (rt_ahci_ata_id_has_lba48(id))
{
return rt_ahci_ata_id_u64(id, RT_AHCI_ATA_ID_LBA_CAPACITY_2);
}
return rt_ahci_ata_id_u32(id, RT_AHCI_ATA_ID_LBA_CAPACITY);
}
return 0;
}
rt_inline rt_bool_t rt_ahci_ata_id_wcache_enabled(const rt_uint16_t *id)
{
if ((id[RT_AHCI_ATA_ID_CSF_DEFAULT] & 0xc000) != 0x4000)
{
return RT_FALSE;
}
return id[RT_AHCI_ATA_ID_CFS_ENABLE_1] & (1 << 5);
}
rt_inline rt_bool_t rt_ahci_ata_id_has_flush(const rt_uint16_t *id)
{
if ((id[RT_AHCI_ATA_ID_COMMAND_SET_2] & 0xc000) != 0x4000)
{
return RT_FALSE;
}
return id[RT_AHCI_ATA_ID_COMMAND_SET_2] & (1 << 12);
}
rt_inline rt_bool_t rt_ahci_ata_id_has_flush_ext(const rt_uint16_t *id)
{
if ((id[RT_AHCI_ATA_ID_COMMAND_SET_2] & 0xc000) != 0x4000)
{
return RT_FALSE;
}
return id[RT_AHCI_ATA_ID_COMMAND_SET_2] & (1 << 13);
}
struct rt_ahci_cmd_hdr
{
rt_uint32_t opts;
rt_uint32_t status;
rt_uint32_t tbl_addr_lo;
rt_uint32_t tbl_addr_hi;
rt_uint32_t reserved[4];
};
struct rt_ahci_sg
{
rt_uint32_t addr_lo;
rt_uint32_t addr_hi;
rt_uint32_t reserved;
rt_uint32_t flags_size;
};
struct rt_ahci_port
{
void *regs;
void *dma;
rt_ubase_t dma_handle;
struct rt_ahci_cmd_hdr *cmd_slot;
struct rt_ahci_sg *cmd_tbl_sg;
void *cmd_tbl;
rt_ubase_t cmd_tbl_dma;
void *rx_fis;
rt_uint32_t int_enabled;
rt_size_t block_size;
rt_uint16_t *ataid;
rt_bool_t link;
struct rt_completion done;
};
struct rt_ahci_host
{
struct rt_scsi_host parent;
int irq;
void *regs;
rt_size_t ports_nr;
rt_uint32_t ports_map;
struct rt_ahci_port ports[32];
rt_uint32_t cap;
rt_uint32_t max_blocks;
const struct rt_ahci_ops *ops;
};
struct rt_ahci_ops
{
rt_err_t (*host_init)(struct rt_ahci_host *host);
rt_err_t (*port_init)(struct rt_ahci_host *host, struct rt_ahci_port *port);
rt_err_t (*port_link_up)(struct rt_ahci_host *host, struct rt_ahci_port *port);
rt_err_t (*port_dma_init)(struct rt_ahci_host *host, struct rt_ahci_port *port);
rt_err_t (*port_isr)(struct rt_ahci_host *host, struct rt_ahci_port *port, rt_uint32_t isr);
};
rt_err_t rt_ahci_host_register(struct rt_ahci_host *host);
rt_err_t rt_ahci_host_unregister(struct rt_ahci_host *host);
#endif /* __AHCI_H__ */

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI first version
*/
#ifndef __BLK_H__
#define __BLK_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/classes/block.h>
struct rt_dm_ida;
struct rt_blk_device;
struct rt_blk_disk_ops;
struct rt_blk_disk
{
struct rt_device parent;
const struct rt_blk_disk_ops *ops;
#ifdef RT_USING_DM
struct rt_dm_ida *ida;
#endif
rt_uint32_t read_only:1;
rt_uint32_t parallel_io:1;
rt_uint32_t removable:1;
#define RT_BLK_DISK_MAGIC 0xbdaabdaa
rt_uint32_t __magic;
rt_uint32_t partitions;
#define RT_BLK_PARTITION_NONE (-1)
#define RT_BLK_PARTITION_MAX (RT_UINT32_MAX >> 1)
rt_int32_t max_partitions;
rt_list_t part_nodes;
struct rt_spinlock lock;
struct rt_semaphore usr_lock;
};
struct rt_blk_disk_ops
{
rt_ssize_t (*read)(struct rt_blk_disk *disk, rt_off_t sector, void *buffer,
rt_size_t sector_count);
rt_ssize_t (*write)(struct rt_blk_disk *disk, rt_off_t sector, const void *buffer,
rt_size_t sector_count);
rt_err_t (*getgeome)(struct rt_blk_disk *disk, struct rt_device_blk_geometry *geometry);
rt_err_t (*sync)(struct rt_blk_disk *disk);
rt_err_t (*erase)(struct rt_blk_disk *disk);
rt_err_t (*autorefresh)(struct rt_blk_disk *disk, rt_bool_t is_auto);
rt_err_t (*control)(struct rt_blk_disk *disk, struct rt_blk_device *blk, int cmd, void *args);
};
#ifndef __DFS_H__
#include <dfs_fs.h>
struct rt_blk_device
{
struct rt_device parent;
int partno;
struct dfs_partition partition;
rt_list_t list;
struct rt_blk_disk *disk;
rt_size_t sector_start;
rt_size_t sector_count;
};
#else
struct rt_blk_device;
#endif /* __DFS_H__ */
rt_err_t rt_hw_blk_disk_register(struct rt_blk_disk *disk);
rt_err_t rt_hw_blk_disk_unregister(struct rt_blk_disk *disk);
rt_err_t rt_blk_disk_probe_partition(struct rt_blk_disk *disk);
rt_ssize_t rt_blk_disk_get_capacity(struct rt_blk_disk *disk);
rt_ssize_t rt_blk_disk_get_logical_block_size(struct rt_blk_disk *disk);
#endif /* __BLK_H__ */

View File

@@ -16,6 +16,8 @@
#include <ref.h>
#include <drivers/ofw.h>
#define RT_CLK_NODE_OBJ_NAME "CLKNP"
struct rt_clk_ops;
struct rt_reset_control_node;
@@ -37,6 +39,8 @@ struct rt_clk_node
* };
* We assume the 'N' is the max value of element in 'clock-indices' if OFW.
*/
struct rt_object rt_parent;
rt_list_t list;
rt_list_t children_nodes;
@@ -74,6 +78,8 @@ struct rt_clk
const char *con_id;
rt_ubase_t rate;
int prepare_count;
int enable_count;
void *fw_node;
void *priv;

View File

@@ -13,9 +13,11 @@
#include <rthw.h>
#include <rtdef.h>
#include <bitmap.h>
#include <ioremap.h>
#include <drivers/misc.h>
#include <drivers/byteorder.h>
#include <drivers/core/master_id.h>
#ifndef RT_CPUS_NR
#define RT_CPUS_NR 1
@@ -27,6 +29,24 @@ extern int rt_hw_cpu_id(void);
void rt_dm_secondary_cpu_init(void);
/* ID Allocation */
struct rt_dm_ida
{
rt_uint8_t master_id;
#define RT_DM_IDA_NUM 256
RT_BITMAP_DECLARE(map, RT_DM_IDA_NUM);
struct rt_spinlock lock;
};
#define RT_DM_IDA_INIT(id) { .master_id = MASTER_ID_##id }
int rt_dm_ida_alloc(struct rt_dm_ida *ida);
rt_bool_t rt_dm_ida_take(struct rt_dm_ida *ida, int id);
void rt_dm_ida_free(struct rt_dm_ida *ida, int id);
rt_device_t rt_dm_device_find(int master_id, int device_id);
int rt_dm_dev_set_name_auto(rt_device_t dev, const char *prefix);
int rt_dm_dev_get_name_id(rt_device_t dev);

View File

@@ -0,0 +1,94 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-04-20 GuEe-GUI the first version
*/
#ifndef __RT_DM_MASTER_ID_H__
#define __RT_DM_MASTER_ID_H__
#define MASTER_ID_CUSTOM 0
/* Block */
#define MASTER_ID_NVME 1
#define MASTER_ID_SCSI_SD 2
#define MASTER_ID_SCSI_CDROM 3
#define MASTER_ID_SDIO 4
#define MASTER_ID_VIRTUAL_BLOCK 5
/* Char */
#define MASTER_ID_RPMSG_EPT 11
#define MASTER_ID_RPMSG_CHAR 12
#define MASTER_ID_SERIAL 13
/* Clock Timer */
#define MASTER_ID_HWTIMER 21
#define MASTER_ID_PTP 22
#define MASTER_ID_RTC 23
/* Graphic Display */
#define MASTER_ID_GRAPHIC_BACKLIGHT 31
#define MASTER_ID_GRAPHIC_FRAMEBUFFER 32
#define MASTER_ID_LED 33
/* Hardware Monitor */
#define MASTER_ID_DVFS 41
#define MASTER_ID_SENSOR 42
#define MASTER_ID_THERMAL 43
#define MASTER_ID_WATCHDOG 44
/* I2C */
#define MASTER_ID_I2C_BUS 51
#define MASTER_ID_I2C_DEV 52
/* IO Contorl */
#define MASTER_ID_ADC 61
#define MASTER_ID_DAC 62
#define MASTER_ID_PIN 63
#define MASTER_ID_PWM 64
/* Memory */
#define MASTER_ID_MEM 71
#define MASTER_ID_MTD 72
/* MISC */
#define MASTER_ID_MISC 81
/* Multimedia */
#define MASTER_ID_AUDIO 91
/* Net */
#define MASTER_ID_CAN 101
#define MASTER_ID_ETH 102
#define MASTER_ID_PHY 103
#define MASTER_ID_WLAN 104
/* Input */
#define MASTER_ID_INPUT 111
#define MASTER_ID_TOUCH 112
/* Security */
#define MASTER_ID_HWCRYPTO 121
#define MASTER_ID_RNG 122
#define MASTER_ID_TEE 123
/* SPI */
#define MASTER_ID_SPI_BUS 131
#define MASTER_ID_SPI_DEV 132
/* TTY */
#define MASTER_ID_TTY 141
#define MASTER_ID_TTY_SLAVES 142
#define MASTER_ID_TTY_ALTERNATE 143
#define MASTER_ID_PTMX 144
/* USB */
#define MASTER_ID_USB_DEV 151
#define MASTER_ID_USB_BUS 152
#define MASTER_ID_USB_OTG 153
#endif /* __RT_DM_MASTER_ID_H__ */

View File

@@ -31,4 +31,8 @@ uint64_t clock_cpu_millisecond(uint64_t cpu_tick);
int clock_cpu_setops(const struct rt_clock_cputime_ops *ops);
#ifdef RT_USING_CPUTIME_RISCV
int riscv_cputime_init(void);
#endif /* RT_USING_CPUTIME_RISCV */
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -11,8 +11,66 @@
#ifndef __DAC_H__
#define __DAC_H__
#include <rtthread.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup DAC DAC
*
* @brief DAC driver api
*
* <b>Example</b>
* @code {.c}
*
* #include <rtthread.h>
* #include <rtdevice.h>
* #include <stdlib.h>
* #define DAC_DEV_NAME "dac1"
* #define DAC_DEV_CHANNEL 1
* #define REFER_VOLTAGE 330
* #define CONVERT_BITS (1 << 12)
*
* static int dac_vol_sample(int argc, char *argv[])
* {
* rt_dac_device_t dac_dev;
* rt_uint32_t value, vol;
* rt_err_t ret = RT_EOK;
*
* dac_dev = (rt_dac_device_t)rt_device_find(DAC_DEV_NAME);
* if (dac_dev == RT_NULL)
* {
* rt_kprintf("dac sample run failed! can't find %s device!\n", DAC_DEV_NAME);
* return -RT_ERROR;
* }
*
* ret = rt_dac_enable(dac_dev, DAC_DEV_CHANNEL);
*
* value = atoi(argv[1]);
* rt_dac_write(dac_dev, DAC_DEV_NAME, DAC_DEV_CHANNEL, value);
* rt_kprintf("the value is :%d \n", value);
*
* vol = value * REFER_VOLTAGE / CONVERT_BITS;
* rt_kprintf("the voltage is :%d.%02d \n", vol / 100, vol % 100);
*
* rt_thread_mdelay(500);
*
* ret = rt_dac_disable(dac_dev, DAC_DEV_CHANNEL);
*
* return ret;
* }
* MSH_CMD_EXPORT(dac_vol_sample, dac voltage convert sample);
*
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup DAC
* @{
*/
struct rt_dac_device;
/**
* @brief Configuration of DAC device
*/
struct rt_dac_ops
{
rt_err_t (*disabled)(struct rt_dac_device *device, rt_uint32_t channel);
@@ -20,7 +78,10 @@ struct rt_dac_ops
rt_err_t (*convert)(struct rt_dac_device *device, rt_uint32_t channel, rt_uint32_t *value);
rt_uint8_t (*get_resolution)(struct rt_dac_device *device);
};
/**
* @brief DAC device structure
*
*/
struct rt_dac_device
{
struct rt_device parent;
@@ -35,10 +96,41 @@ typedef enum
RT_DAC_CMD_GET_RESOLUTION = RT_DEVICE_CTRL_BASE(DAC) + 2,
} rt_dac_cmd_t;
/**
* @brief Register a DAC device
* @param dac DAC device
* @param name DAC name
* @param ops the operations of DAC device
* @param user_data device private data
* @return rt_err_t error code
*/
rt_err_t rt_hw_dac_register(rt_dac_device_t dac,const char *name, const struct rt_dac_ops *ops, const void *user_data);
/**
* @brief set the value of DAC
* @param dev DAC device
* @param channel DAC channel
* @param value the value of DAC
* @return rt_err_t error code
*/
rt_err_t rt_dac_write(rt_dac_device_t dev, rt_uint32_t channel, rt_uint32_t value);
/**
* @brief enable the DAC channel
* @param dev DAC device
* @param channel DAC channel
* @return rt_err_t error code
*/
rt_err_t rt_dac_enable(rt_dac_device_t dev, rt_uint32_t channel);
/**
* @brief disable the DAC channel
* @param dev DAC device
* @param channel DAC channel
* @return rt_err_t error code
*/
rt_err_t rt_dac_disable(rt_dac_device_t dev, rt_uint32_t channel);
/*! @}*/
#endif /* __dac_H__ */

View File

@@ -0,0 +1,75 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-10-27 heyuanjie87 first version.
* 2013-05-17 aozima initial alarm event & mutex in system init.
* 2020-10-15 zhangsz add alarm flags hour minute second.
*/
#ifndef __DEV_ALARM_H__
#define __DEV_ALARM_H__
#include <sys/time.h>
#include <rtdef.h>
#define RT_ALARM_TM_NOW -1 /* set the alarm tm_day,tm_mon,tm_sec,etc.
to now.we also call it "don't care" value */
/* alarm flags */
#define RT_ALARM_ONESHOT 0x000 /* only alarm once */
#define RT_ALARM_DAILY 0x100 /* alarm everyday */
#define RT_ALARM_WEEKLY 0x200 /* alarm weekly at Monday or Friday etc. */
#define RT_ALARM_MONTHLY 0x400 /* alarm monthly at someday */
#define RT_ALARM_YAERLY 0x800 /* alarm yearly at a certain date */
#define RT_ALARM_HOUR 0x1000 /* alarm each hour at a certain min:second */
#define RT_ALARM_MINUTE 0x2000 /* alarm each minute at a certain second */
#define RT_ALARM_SECOND 0x4000 /* alarm each second */
#define RT_ALARM_STATE_INITED 0x02
#define RT_ALARM_STATE_START 0x01
#define RT_ALARM_STATE_STOP 0x00
/* alarm control cmd */
#define RT_ALARM_CTRL_MODIFY 1 /* modify alarm time or alarm flag */
typedef struct rt_alarm *rt_alarm_t;
typedef void (*rt_alarm_callback_t)(rt_alarm_t alarm, time_t timestamp);
struct rt_alarm
{
rt_list_t list;
rt_uint32_t flag;
rt_alarm_callback_t callback;
struct tm wktime;
void *user_data;
};
struct rt_alarm_setup
{
rt_uint32_t flag; /* alarm flag */
struct tm wktime; /* when will the alarm wake up user */
};
struct rt_alarm_container
{
rt_list_t head;
struct rt_mutex mutex;
struct rt_event event;
struct rt_alarm *current;
};
rt_alarm_t rt_alarm_create(rt_alarm_callback_t callback,
struct rt_alarm_setup *setup);
rt_err_t rt_alarm_control(rt_alarm_t alarm, int cmd, void *arg);
void rt_alarm_update(rt_device_t dev, rt_uint32_t event);
rt_err_t rt_alarm_delete(rt_alarm_t alarm);
rt_err_t rt_alarm_start(rt_alarm_t alarm);
rt_err_t rt_alarm_stop(rt_alarm_t alarm);
int rt_alarm_system_init(void);
#endif /* __DEV_ALARM_H__ */

View File

@@ -0,0 +1,176 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017-05-09 Urey first version
* 2019-07-09 Zero-Free improve device ops interface and data flows
*
*/
#ifndef __DEV_AUDIO_H__
#define __DEV_AUDIO_H__
#include "dev_audio_pipe.h"
/* AUDIO command */
#define _AUDIO_CTL(a) (RT_DEVICE_CTRL_BASE(Sound) + a)
#define AUDIO_CTL_GETCAPS _AUDIO_CTL(1)
#define AUDIO_CTL_CONFIGURE _AUDIO_CTL(2)
#define AUDIO_CTL_START _AUDIO_CTL(3)
#define AUDIO_CTL_STOP _AUDIO_CTL(4)
#define AUDIO_CTL_GETBUFFERINFO _AUDIO_CTL(5)
/* Audio Device Types */
#define AUDIO_TYPE_QUERY 0x00
#define AUDIO_TYPE_INPUT 0x01
#define AUDIO_TYPE_OUTPUT 0x02
#define AUDIO_TYPE_MIXER 0x04
/* Supported Sampling Rates */
#define AUDIO_SAMP_RATE_8K 0x0001
#define AUDIO_SAMP_RATE_11K 0x0002
#define AUDIO_SAMP_RATE_16K 0x0004
#define AUDIO_SAMP_RATE_22K 0x0008
#define AUDIO_SAMP_RATE_32K 0x0010
#define AUDIO_SAMP_RATE_44K 0x0020
#define AUDIO_SAMP_RATE_48K 0x0040
#define AUDIO_SAMP_RATE_96K 0x0080
#define AUDIO_SAMP_RATE_128K 0x0100
#define AUDIO_SAMP_RATE_160K 0x0200
#define AUDIO_SAMP_RATE_172K 0x0400
#define AUDIO_SAMP_RATE_192K 0x0800
/* Supported Bit Rates */
#define AUDIO_BIT_RATE_22K 0x01
#define AUDIO_BIT_RATE_44K 0x02
#define AUDIO_BIT_RATE_48K 0x04
#define AUDIO_BIT_RATE_96K 0x08
#define AUDIO_BIT_RATE_128K 0x10
#define AUDIO_BIT_RATE_160K 0x20
#define AUDIO_BIT_RATE_172K 0x40
#define AUDIO_BIT_RATE_192K 0x80
/* Support Dsp(input/output) Units controls */
#define AUDIO_DSP_PARAM 0 /* get/set all params */
#define AUDIO_DSP_SAMPLERATE 1 /* samplerate */
#define AUDIO_DSP_CHANNELS 2 /* channels */
#define AUDIO_DSP_SAMPLEBITS 3 /* sample bits width */
/* Supported Mixer Units controls */
#define AUDIO_MIXER_QUERY 0x0000
#define AUDIO_MIXER_MUTE 0x0001
#define AUDIO_MIXER_VOLUME 0x0002
#define AUDIO_MIXER_BASS 0x0004
#define AUDIO_MIXER_MID 0x0008
#define AUDIO_MIXER_TREBLE 0x0010
#define AUDIO_MIXER_EQUALIZER 0x0020
#define AUDIO_MIXER_LINE 0x0040
#define AUDIO_MIXER_DIGITAL 0x0080
#define AUDIO_MIXER_MIC 0x0100
#define AUDIO_MIXER_VITURAL 0x0200
#define AUDIO_MIXER_EXTEND 0x8000 /* extend mixer command */
#define AUDIO_VOLUME_MAX (100)
#define AUDIO_VOLUME_MIN (0)
#define CFG_AUDIO_REPLAY_QUEUE_COUNT 4
enum
{
AUDIO_STREAM_REPLAY = 0,
AUDIO_STREAM_RECORD,
AUDIO_STREAM_LAST = AUDIO_STREAM_RECORD,
};
/* the preferred number and size of audio pipeline buffer for the audio device */
struct rt_audio_buf_info
{
rt_uint8_t *buffer;
rt_uint16_t block_size;
rt_uint16_t block_count;
rt_uint32_t total_size;
};
struct rt_audio_device;
struct rt_audio_caps;
struct rt_audio_configure;
struct rt_audio_ops
{
rt_err_t (*getcaps)(struct rt_audio_device *audio, struct rt_audio_caps *caps);
rt_err_t (*configure)(struct rt_audio_device *audio, struct rt_audio_caps *caps);
rt_err_t (*init)(struct rt_audio_device *audio);
rt_err_t (*start)(struct rt_audio_device *audio, int stream);
rt_err_t (*stop)(struct rt_audio_device *audio, int stream);
rt_ssize_t (*transmit)(struct rt_audio_device *audio, const void *writeBuf, void *readBuf, rt_size_t size);
/* get page size of codec or private buffer's info */
void (*buffer_info)(struct rt_audio_device *audio, struct rt_audio_buf_info *info);
};
struct rt_audio_configure
{
rt_uint32_t samplerate;
rt_uint16_t channels;
rt_uint16_t samplebits;
};
struct rt_audio_caps
{
int main_type;
int sub_type;
union
{
rt_uint32_t mask;
int value;
struct rt_audio_configure config;
} udata;
};
struct rt_audio_replay
{
struct rt_mempool *mp;
struct rt_data_queue queue;
struct rt_mutex lock;
struct rt_completion cmp;
struct rt_audio_buf_info buf_info;
rt_uint8_t *write_data;
rt_uint16_t write_index;
rt_uint16_t read_index;
rt_uint32_t pos;
rt_uint8_t event;
rt_bool_t activated;
};
struct rt_audio_record
{
struct rt_audio_pipe pipe;
rt_bool_t activated;
};
struct rt_audio_device
{
struct rt_device parent;
struct rt_audio_ops *ops;
struct rt_audio_replay *replay;
struct rt_audio_record *record;
};
rt_err_t rt_audio_register(struct rt_audio_device *audio, const char *name, rt_uint32_t flag, void *data);
void rt_audio_tx_complete(struct rt_audio_device *audio);
void rt_audio_rx_done(struct rt_audio_device *audio, rt_uint8_t *pbuf, rt_size_t len);
/* Device Control Commands */
#define CODEC_CMD_RESET 0
#define CODEC_CMD_SET_VOLUME 1
#define CODEC_CMD_GET_VOLUME 2
#define CODEC_CMD_SAMPLERATE 3
#define CODEC_CMD_EQ 4
#define CODEC_CMD_3D 5
#define CODEC_VOLUME_MAX (63)
#endif /* __DEV_AUDIO_H__ */

View File

@@ -0,0 +1,547 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-05-14 aubrcool@qq.com first version
* 2015-07-06 Bernard remove RT_CAN_USING_LED.
* 2022-05-08 hpmicro add CANFD support, fixed typos
*/
#ifndef __DEV_CAN_H_
#define __DEV_CAN_H_
#include <rtthread.h>
#ifndef RT_CANMSG_BOX_SZ
#define RT_CANMSG_BOX_SZ 16
#endif
#ifndef RT_CANSND_BOX_NUM
#define RT_CANSND_BOX_NUM 1
#endif
enum CAN_DLC
{
CAN_MSG_0BYTE = 0,
CAN_MSG_1BYTE,
CAN_MSG_2BYTES,
CAN_MSG_3BYTES,
CAN_MSG_4BYTES,
CAN_MSG_5BYTES,
CAN_MSG_6BYTES,
CAN_MSG_7BYTES,
CAN_MSG_8BYTES,
CAN_MSG_12BYTES,
CAN_MSG_16BYTES,
CAN_MSG_20BYTES,
CAN_MSG_24BYTES,
CAN_MSG_32BYTES,
CAN_MSG_48BYTES,
CAN_MSG_64BYTES,
};
enum CANBAUD
{
CAN1MBaud = 1000UL * 1000,/* 1 MBit/sec */
CAN800kBaud = 1000UL * 800, /* 800 kBit/sec */
CAN500kBaud = 1000UL * 500, /* 500 kBit/sec */
CAN250kBaud = 1000UL * 250, /* 250 kBit/sec */
CAN125kBaud = 1000UL * 125, /* 125 kBit/sec */
CAN100kBaud = 1000UL * 100, /* 100 kBit/sec */
CAN50kBaud = 1000UL * 50, /* 50 kBit/sec */
CAN20kBaud = 1000UL * 20, /* 20 kBit/sec */
CAN10kBaud = 1000UL * 10 /* 10 kBit/sec */
};
#define RT_CAN_MODE_NORMAL 0
#define RT_CAN_MODE_LISTEN 1
#define RT_CAN_MODE_LOOPBACK 2
#define RT_CAN_MODE_LOOPBACKANLISTEN 3
#define RT_CAN_MODE_PRIV 0x01
#define RT_CAN_MODE_NOPRIV 0x00
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup CAN_Device CAN Driver
*
* @brief CAN driver api
*
* <b>Example</b>
* @code {.c}
* #include <rtthread.h>
* #include "rtdevice.h"
*
* #define CAN_DEV_NAME "can1" // CAN 设备名称
*
* static struct rt_semaphore rx_sem; // 用于接收消息的信号量
* static rt_device_t can_dev; // CAN 设备句柄
*
* // 接收数据回调函数
* static rt_err_t can_rx_call(rt_device_t dev, rt_size_t size)
* {
* // CAN 接收到数据后产生中断,调用此回调函数,然后发送接收信号量
* rt_sem_release(&rx_sem);
*
* return RT_EOK;
* }
*
* static void can_rx_thread(void *parameter)
* {
* int i;
* rt_err_t res;
* struct rt_can_msg rxmsg = {0};
*
* // 设置接收回调函数
* rt_device_set_rx_indicate(can_dev, can_rx_call);
*
* #ifdef RT_CAN_USING_HDR
* struct rt_can_filter_item items[5] =
* {
* RT_CAN_FILTER_ITEM_INIT(0x100, 0, 0, 0, 0x700, RT_NULL, RT_NULL), // std,match ID:0x100~0x1ffhdr 为 - 1设置默认过滤表
* RT_CAN_FILTER_ITEM_INIT(0x300, 0, 0, 0, 0x700, RT_NULL, RT_NULL), // std,match ID:0x300~0x3ffhdr 为 - 1
* RT_CAN_FILTER_ITEM_INIT(0x211, 0, 0, 0, 0x7ff, RT_NULL, RT_NULL), // std,match ID:0x211hdr 为 - 1
* RT_CAN_FILTER_STD_INIT(0x486, RT_NULL, RT_NULL), // std,match ID:0x486hdr 为 - 1
* {0x555, 0, 0, 0, 0x7ff, 7,} // std,match ID:0x555hdr 为 7指定设置 7 号过滤表
* };
* struct rt_can_filter_config cfg = {5, 1, items}; // 一共有 5 个过滤表
* // 设置硬件过滤表
* res = rt_device_control(can_dev, RT_CAN_CMD_SET_FILTER, &cfg);
* RT_ASSERT(res == RT_EOK);
* #endif
* res = RT_TRUE;
* res = rt_device_control(can_dev, RT_CAN_CMD_START, &res);
* while (1)
* {
* // hdr 值为 - 1表示直接从 uselist 链表读取数据
* rxmsg.hdr = -1;
* // 阻塞等待接收信号量
* rt_sem_take(&rx_sem, RT_WAITING_FOREVER);
* // 从 CAN 读取一帧数据
* rt_device_read(can_dev, 0, &rxmsg, sizeof(rxmsg));
* // 打印数据 ID 及内容
* rt_kprintf("ID:%x", rxmsg.id);
* for (i = 0; i < 8; i++)
* {
* rt_kprintf("%2x", rxmsg.data[i]);
* }
*
* rt_kprintf("\n");
* }
* }
*
* int can_sample(int argc, char *argv[])
* {
* struct rt_can_msg msg = {0};
* rt_err_t res;
* rt_size_t size;
* rt_thread_t thread;
* char can_name[RT_NAME_MAX];
*
* if (argc == 2)
* {
* rt_strncpy(can_name, argv[1], RT_NAME_MAX);
* }
* else
* {
* rt_strncpy(can_name, CAN_DEV_NAME, RT_NAME_MAX);
* }
* // 查找 CAN 设备
* can_dev = rt_device_find(can_name);
* if (!can_dev)
* {
* rt_kprintf("find %s failed!\n", can_name);
* return -RT_ERROR;
* }
*
* // 初始化 CAN 接收信号量
* rt_sem_init(&rx_sem, "rx_sem", 0, RT_IPC_FLAG_FIFO);
*
* // 以中断接收及发送方式打开 CAN 设备
* res = rt_device_open(can_dev, RT_DEVICE_FLAG_INT_TX | RT_DEVICE_FLAG_INT_RX);
* RT_ASSERT(res == RT_EOK);
* // 创建数据接收线程
* thread = rt_thread_create("can_rx", can_rx_thread, RT_NULL, 1024, 25, 10);
* if (thread != RT_NULL)
* {
* rt_thread_startup(thread);
* }
* else
* {
* rt_kprintf("create can_rx thread failed!\n");
* }
*
* msg.id = 0x78; // ID 为 0x78
* msg.ide = RT_CAN_STDID; // 标准格式
* msg.rtr = RT_CAN_DTR; // 数据帧
* msg.len = 8; // 数据长度为 8
* // 待发送的 8 字节数据
* msg.data[0] = 0x00;
* msg.data[1] = 0x11;
* msg.data[2] = 0x22;
* msg.data[3] = 0x33;
* msg.data[4] = 0x44;
* msg.data[5] = 0x55;
* msg.data[6] = 0x66;
* msg.data[7] = 0x77;
* // 发送一帧 CAN 数据
* size = rt_device_write(can_dev, 0, &msg, sizeof(msg));
* if (size == 0)
* {
* rt_kprintf("can dev write data failed!\n");
* }
*
* return res;
* }
* // 导出到 msh 命令列表中
* MSH_CMD_EXPORT(can_sample, can device sample);
* @endcode
*
* @ingroup Drivers
*
*/
/*!
* @addtogroup CAN_Device
* @{
*/
#define CAN_RX_FIFO0 (0x00000000U) /*!< CAN receive FIFO 0 */
#define CAN_RX_FIFO1 (0x00000001U) /*!< CAN receive FIFO 1 */
/**
* @brief CAN filter item
*/
struct rt_can_filter_item
{
rt_uint32_t id : 29;
rt_uint32_t ide : 1;
rt_uint32_t rtr : 1;
rt_uint32_t mode : 1;
rt_uint32_t mask;
rt_int32_t hdr_bank;/*Should be defined as:rx.FilterBank,which should be changed to rt_int32_t hdr_bank*/
rt_uint32_t rxfifo;/*Add a configuration item that CAN_RX_FIFO0/CAN_RX_FIFO1*/
#ifdef RT_CAN_USING_HDR
rt_err_t (*ind)(rt_device_t dev, void *args , rt_int32_t hdr, rt_size_t size);
void *args;
#endif /*RT_CAN_USING_HDR*/
};
#ifdef RT_CAN_USING_HDR
#define RT_CAN_FILTER_ITEM_INIT(id,ide,rtr,mode,mask,ind,args) \
{(id), (ide), (rtr), (mode),(mask), -1, CAN_RX_FIFO0,(ind), (args)}/*0:CAN_RX_FIFO0*/
#define RT_CAN_FILTER_STD_INIT(id,ind,args) \
RT_CAN_FILTER_ITEM_INIT(id,0,0,0,0xFFFFFFFF,ind,args)
#define RT_CAN_FILTER_EXT_INIT(id,ind,args) \
RT_CAN_FILTER_ITEM_INIT(id,1,0,0,0xFFFFFFFF,ind,args)
#define RT_CAN_STD_RMT_FILTER_INIT(id,ind,args) \
RT_CAN_FILTER_ITEM_INIT(id,0,1,0,0xFFFFFFFF,ind,args)
#define RT_CAN_EXT_RMT_FILTER_INIT(id,ind,args) \
RT_CAN_FILTER_ITEM_INIT(id,1,1,0,0xFFFFFFFF,ind,args)
#define RT_CAN_STD_RMT_DATA_FILTER_INIT(id,ind,args) \
RT_CAN_FILTER_ITEM_INIT(id,0,0,1,0xFFFFFFFF,ind,args)
#define RT_CAN_EXT_RMT_DATA_FILTER_INIT(id,ind,args) \
RT_CAN_FILTER_ITEM_INIT(id,1,0,1,0xFFFFFFFF,ind,args)
#else
#define RT_CAN_FILTER_ITEM_INIT(id,ide,rtr,mode,mask) \
{(id), (ide), (rtr), (mode), (mask), -1, CAN_RX_FIFO0 }/*0:CAN_RX_FIFO0*/
#define RT_CAN_FILTER_STD_INIT(id) \
RT_CAN_FILTER_ITEM_INIT(id,0,0,0,0xFFFFFFFF)
#define RT_CAN_FILTER_EXT_INIT(id) \
RT_CAN_FILTER_ITEM_INIT(id,1,0,0,0xFFFFFFFF)
#define RT_CAN_STD_RMT_FILTER_INIT(id) \
RT_CAN_FILTER_ITEM_INIT(id,0,1,0,0xFFFFFFFF)
#define RT_CAN_EXT_RMT_FILTER_INIT(id) \
RT_CAN_FILTER_ITEM_INIT(id,1,1,0,0xFFFFFFFF)
#define RT_CAN_STD_RMT_DATA_FILTER_INIT(id) \
RT_CAN_FILTER_ITEM_INIT(id,0,0,1,0xFFFFFFFF)
#define RT_CAN_EXT_RMT_DATA_FILTER_INIT(id) \
RT_CAN_FILTER_ITEM_INIT(id,1,0,1,0xFFFFFFFF)
#endif
/**
* @brief CAN filter configuration
*/
struct rt_can_filter_config
{
rt_uint32_t count;
rt_uint32_t actived;
struct rt_can_filter_item *items;
};
/**
* @brief CAN timing configuration
*/
struct rt_can_bit_timing
{
rt_uint16_t prescaler; /* Pre-scaler */
rt_uint16_t num_seg1; /* Bit Timing Segment 1, in terms of Tq */
rt_uint16_t num_seg2; /* Bit Timing Segment 2, in terms of Tq */
rt_uint8_t num_sjw; /* Synchronization Jump Width, in terms of Tq */
rt_uint8_t num_sspoff; /* Secondary Sample Point Offset, in terms of Tq */
};
/**
* @brief CAN bit timing configuration list
* @note
* items[0] always for CAN2.0/CANFD Arbitration Phase
* items[1] always for CANFD (if it exists)
*/
struct rt_can_bit_timing_config
{
rt_uint32_t count;
struct rt_can_bit_timing *items;
};
/**
* @brief CAN configuration
*/
struct can_configure
{
rt_uint32_t baud_rate;
rt_uint32_t msgboxsz;
rt_uint32_t sndboxnumber;
rt_uint32_t mode : 8;
rt_uint32_t privmode : 8;
rt_uint32_t reserved : 16;
rt_uint32_t ticks;
#ifdef RT_CAN_USING_HDR
rt_uint32_t maxhdr;
#endif
#ifdef RT_CAN_USING_CANFD
rt_uint32_t baud_rate_fd; /* CANFD data bit rate*/
rt_uint32_t use_bit_timing: 8; /* Use the bit timing for CAN timing configuration */
rt_uint32_t enable_canfd : 8; /* Enable CAN-FD mode */
rt_uint32_t reserved1 : 16;
/* The below fields take effect only if use_bit_timing is non-zero */
struct rt_can_bit_timing can_timing; /* CAN bit-timing /CANFD bit-timing for arbitration phase */
struct rt_can_bit_timing canfd_timing; /* CANFD bit-timing for datat phase */
#endif
};
#define CANDEFAULTCONFIG \
{\
CAN1MBaud,\
RT_CANMSG_BOX_SZ,\
RT_CANSND_BOX_NUM,\
RT_CAN_MODE_NORMAL,\
};
struct rt_can_ops;
#define RT_CAN_CMD_SET_FILTER 0x13
#define RT_CAN_CMD_SET_BAUD 0x14
#define RT_CAN_CMD_SET_MODE 0x15
#define RT_CAN_CMD_SET_PRIV 0x16
#define RT_CAN_CMD_GET_STATUS 0x17
#define RT_CAN_CMD_SET_STATUS_IND 0x18
#define RT_CAN_CMD_SET_BUS_HOOK 0x19
#define RT_CAN_CMD_SET_CANFD 0x1A
#define RT_CAN_CMD_SET_BAUD_FD 0x1B
#define RT_CAN_CMD_SET_BITTIMING 0x1C
#define RT_CAN_CMD_START 0x1D
#define RT_DEVICE_CAN_INT_ERR 0x1000
enum RT_CAN_STATUS_MODE
{
NORMAL = 0,
ERRWARNING = 1,
ERRPASSIVE = 2,
BUSOFF = 4,
};
enum RT_CAN_BUS_ERR
{
RT_CAN_BUS_NO_ERR = 0,
RT_CAN_BUS_BIT_PAD_ERR = 1,
RT_CAN_BUS_FORMAT_ERR = 2,
RT_CAN_BUS_ACK_ERR = 3,
RT_CAN_BUS_IMPLICIT_BIT_ERR = 4,
RT_CAN_BUS_EXPLICIT_BIT_ERR = 5,
RT_CAN_BUS_CRC_ERR = 6,
};
/**
* @brief CAN status
*/
struct rt_can_status
{
rt_uint32_t rcverrcnt;
rt_uint32_t snderrcnt;
rt_uint32_t errcode;
rt_uint32_t rcvpkg;
rt_uint32_t dropedrcvpkg;
rt_uint32_t sndpkg;
rt_uint32_t dropedsndpkg;
rt_uint32_t bitpaderrcnt;
rt_uint32_t formaterrcnt;
rt_uint32_t ackerrcnt;
rt_uint32_t biterrcnt;
rt_uint32_t crcerrcnt;
rt_uint32_t rcvchange;
rt_uint32_t sndchange;
rt_uint32_t lasterrtype;
};
#ifdef RT_CAN_USING_HDR
struct rt_can_hdr
{
rt_uint32_t connected;
rt_uint32_t msgs;
struct rt_can_filter_item filter;
struct rt_list_node list;
};
#endif
struct rt_can_device;
typedef rt_err_t (*rt_canstatus_ind)(struct rt_can_device *, void *);
typedef struct rt_can_status_ind_type
{
rt_canstatus_ind ind;
void *args;
} *rt_can_status_ind_type_t;
typedef void (*rt_can_bus_hook)(struct rt_can_device *);
struct rt_can_device
{
struct rt_device parent;
const struct rt_can_ops *ops;
struct can_configure config;
struct rt_can_status status;
rt_uint32_t timerinitflag;
struct rt_timer timer;
struct rt_can_status_ind_type status_indicate;
#ifdef RT_CAN_USING_HDR
struct rt_can_hdr *hdr;
#endif
#ifdef RT_CAN_USING_BUS_HOOK
rt_can_bus_hook bus_hook;
#endif /*RT_CAN_USING_BUS_HOOK*/
struct rt_mutex lock;
void *can_rx;
void *can_tx;
};
typedef struct rt_can_device *rt_can_t;
#define RT_CAN_STDID 0
#define RT_CAN_EXTID 1
#define RT_CAN_DTR 0
#define RT_CAN_RTR 1
typedef struct rt_can_status *rt_can_status_t;
struct rt_can_msg
{
rt_uint32_t id : 29;
rt_uint32_t ide : 1;
rt_uint32_t rtr : 1;
rt_uint32_t rsv : 1;
rt_uint32_t len : 8;
rt_uint32_t priv : 8;
rt_int32_t hdr_index : 8;/*Should be defined as:rx.FilterMatchIndex,which should be changed to rt_int32_t hdr_index : 8*/
#ifdef RT_CAN_USING_CANFD
rt_uint32_t fd_frame : 1;
rt_uint32_t brs : 1;
rt_uint32_t rxfifo : 2;/*Redefined to return :CAN RX FIFO0/CAN RX FIFO1*/
rt_uint32_t reserved : 4;
#else
rt_uint32_t rxfifo : 2;/*Redefined to return :CAN RX FIFO0/CAN RX FIFO1*/
rt_uint32_t reserved : 6;
#endif
#ifdef RT_CAN_USING_CANFD
rt_uint8_t data[64];
#else
rt_uint8_t data[8];
#endif
};
typedef struct rt_can_msg *rt_can_msg_t;
struct rt_can_msg_list
{
struct rt_list_node list;
#ifdef RT_CAN_USING_HDR
struct rt_list_node hdrlist;
struct rt_can_hdr *owner;
#endif
struct rt_can_msg data;
};
struct rt_can_rx_fifo
{
/* software fifo */
struct rt_can_msg_list *buffer;
rt_uint32_t freenumbers;
struct rt_list_node freelist;
struct rt_list_node uselist;
};
#define RT_CAN_SND_RESULT_OK 0
#define RT_CAN_SND_RESULT_ERR 1
#define RT_CAN_SND_RESULT_WAIT 2
#define RT_CAN_EVENT_RX_IND 0x01 /* Rx indication */
#define RT_CAN_EVENT_TX_DONE 0x02 /* Tx complete */
#define RT_CAN_EVENT_TX_FAIL 0x03 /* Tx fail */
#define RT_CAN_EVENT_RX_TIMEOUT 0x05 /* Rx timeout */
#define RT_CAN_EVENT_RXOF_IND 0x06 /* Rx overflow */
struct rt_can_sndbxinx_list
{
struct rt_list_node list;
struct rt_completion completion;
rt_uint32_t result;
};
struct rt_can_tx_fifo
{
struct rt_can_sndbxinx_list *buffer;
struct rt_semaphore sem;
struct rt_list_node freelist;
};
/**
* @brief CAN operators
*/
struct rt_can_ops
{
rt_err_t (*configure)(struct rt_can_device *can, struct can_configure *cfg);
rt_err_t (*control)(struct rt_can_device *can, int cmd, void *arg);
rt_ssize_t (*sendmsg)(struct rt_can_device *can, const void *buf, rt_uint32_t boxno);
rt_ssize_t (*recvmsg)(struct rt_can_device *can, void *buf, rt_uint32_t boxno);
};
/**
* @brief Register a CAN device to device list
*
* @param can the CAN device object
* @param name the name of CAN device
* @param ops the CAN device operators
* @param data the private data of CAN device
*
* @return the error code, RT_EOK on successfully
*/
rt_err_t rt_hw_can_register(struct rt_can_device *can,
const char *name,
const struct rt_can_ops *ops,
void *data);
/**
* @brief CAN interrupt service routine
*
* @param can the CAN device
* @param event the event mask
*/
void rt_hw_can_isr(struct rt_can_device *can, int event);
/*! @}*/
#endif /*__DEV_CAN_H*/

View File

@@ -0,0 +1,401 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-04-25 weety first version
* 2021-04-20 RiceChen added support for bus control api
*/
#ifndef __DEV_I2C_H__
#define __DEV_I2C_H__
#include <rtthread.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup I2C I2C
*
* @brief I2C driver api
*
* <b>Example</b>
* @code {.c}
* #include <rtthread.h>
* #include <rtdevice.h>
*
* #define AHT10_I2C_BUS_NAME "i2c1" // 传感器连接的I2C总线设备名称
* #define AHT10_ADDR 0x38 // 从机地址
* #define AHT10_CALIBRATION_CMD 0xE1 // 校准命令
* #define AHT10_NORMAL_CMD 0xA8 // 一般命令
* #define AHT10_GET_DATA 0xAC // 获取数据命令
*
* static struct rt_i2c_bus_device *i2c_bus = RT_NULL; // I2C总线设备句柄
* static rt_bool_t initialized = RT_FALSE; // 传感器初始化状态
*
* // 写传感器寄存器
* static rt_err_t write_reg(struct rt_i2c_bus_device *bus, rt_uint8_t reg, rt_uint8_t *data)
* {
* rt_uint8_t buf[3];
* struct rt_i2c_msg msgs;
* rt_uint32_t buf_size = 1;
*
* buf[0] = reg; //cmd
* if (data != RT_NULL)
* {
* buf[1] = data[0];
* buf[2] = data[1];
* buf_size = 3;
* }
*
* msgs.addr = AHT10_ADDR;
* msgs.flags = RT_I2C_WR;
* msgs.buf = buf;
* msgs.len = buf_size;
*
* // 调用I2C设备接口传输数据
* if (rt_i2c_transfer(bus, &msgs, 1) == 1)
* {
* return RT_EOK;
* }
* else
* {
* return -RT_ERROR;
* }
* }
*
* // 读传感器寄存器数据
* static rt_err_t read_regs(struct rt_i2c_bus_device *bus, rt_uint8_t len, rt_uint8_t *buf)
* {
* struct rt_i2c_msg msgs;
*
* msgs.addr = AHT10_ADDR;
* msgs.flags = RT_I2C_RD;
* msgs.buf = buf;
* msgs.len = len;
*
* // 调用I2C设备接口传输数据
* if (rt_i2c_transfer(bus, &msgs, 1) == 1)
* {
* return RT_EOK;
* }
* else
* {
* return -RT_ERROR;
* }
* }
*
* static void read_temp_humi(float *cur_temp, float *cur_humi)
* {
* rt_uint8_t temp[6];
*
* write_reg(i2c_bus, AHT10_GET_DATA, RT_NULL); // 发送命令
* rt_thread_mdelay(400);
* read_regs(i2c_bus, 6, temp); // 获取传感器数据
*
* // 湿度数据转换
* *cur_humi = (temp[1] << 12 | temp[2] << 4 | (temp[3] & 0xf0) >> 4) * 100.0 / (1 << 20);
* // 温度数据转换
* *cur_temp = ((temp[3] & 0xf) << 16 | temp[4] << 8 | temp[5]) * 200.0 / (1 << 20) - 50;
* }
*
* static void aht10_init(const char *name)
* {
* rt_uint8_t temp[2] = {0, 0};
*
* // 查找I2C总线设备获取I2C总线设备句柄
* i2c_bus = (struct rt_i2c_bus_device *)rt_device_find(name);
*
* if (i2c_bus == RT_NULL)
* {
* rt_kprintf("can't find %s device!\n", name);
* }
* else
* {
* write_reg(i2c_bus, AHT10_NORMAL_CMD, temp);
* rt_thread_mdelay(400);
*
* temp[0] = 0x08;
* temp[1] = 0x00;
* write_reg(i2c_bus, AHT10_CALIBRATION_CMD, temp);
* rt_thread_mdelay(400);
* initialized = RT_TRUE;
* }
* }
*
* static void i2c_aht10_sample(int argc, char *argv[])
* {
* float humidity, temperature;
* char name[RT_NAME_MAX];
*
* humidity = 0.0;
* temperature = 0.0;
*
* if (argc == 2)
* {
* rt_strncpy(name, argv[1], RT_NAME_MAX);
* }
* else
* {
* rt_strncpy(name, AHT10_I2C_BUS_NAME, RT_NAME_MAX);
* }
*
* if (!initialized)
* {
* // 传感器初始化
* aht10_init(name);
* }
* if (initialized)
* {
* // 读取温湿度数据
* read_temp_humi(&temperature, &humidity);
*
* rt_kprintf("read aht10 sensor humidity : %d.%d %%\n", (int)humidity, (int)(humidity * 10) % 10);
* if( temperature >= 0 )
* {
* rt_kprintf("read aht10 sensor temperature: %d.%d°C\n", (int)temperature, (int)(temperature * 10) % 10);
* }
* else
* {
* rt_kprintf("read aht10 sensor temperature: %d.%d°C\n", (int)temperature, (int)(-temperature * 10) % 10);
* }
* }
* else
* {
* rt_kprintf("initialize sensor failed!\n");
* }
* }
* // 导出到 msh 命令列表中
* MSH_CMD_EXPORT(i2c_aht10_sample, i2c aht10 sample);
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup I2C
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
#define RT_I2C_WR 0x0000 /*!< i2c wirte flag */
#define RT_I2C_RD (1u << 0) /*!< i2c read flag */
#define RT_I2C_ADDR_10BIT (1u << 2) /*!< this is a ten bit chip address */
#define RT_I2C_NO_START (1u << 4) /*!< do not generate START condition */
#define RT_I2C_IGNORE_NACK (1u << 5) /*!< ignore NACK from slave */
#define RT_I2C_NO_READ_ACK (1u << 6) /* when I2C reading, we do not ACK */
#define RT_I2C_NO_STOP (1u << 7) /*!< do not generate STOP condition */
#define RT_I2C_DEV_CTRL_10BIT (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x01)
#define RT_I2C_DEV_CTRL_ADDR (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x02)
#define RT_I2C_DEV_CTRL_TIMEOUT (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x03)
#define RT_I2C_DEV_CTRL_RW (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x04)
#define RT_I2C_DEV_CTRL_CLK (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x05)
#define RT_I2C_DEV_CTRL_UNLOCK (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x06)
#define RT_I2C_DEV_CTRL_GET_STATE (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x07)
#define RT_I2C_DEV_CTRL_GET_MODE (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x08)
#define RT_I2C_DEV_CTRL_GET_ERROR (RT_DEVICE_CTRL_BASE(I2CBUS) + 0x09)
/**
* @brief I2C Private Data
*/
struct rt_i2c_priv_data
{
struct rt_i2c_msg *msgs;
rt_size_t number;
};
/**
* @brief I2C Message
*/
struct rt_i2c_msg
{
rt_uint16_t addr;
rt_uint16_t flags;
rt_uint16_t len;
rt_uint8_t *buf;
};
struct rt_i2c_bus_device;
/**
* @brief I2C Bus Device Operations
*/
struct rt_i2c_bus_device_ops
{
rt_ssize_t (*master_xfer)(struct rt_i2c_bus_device *bus,
struct rt_i2c_msg msgs[],
rt_uint32_t num);
rt_ssize_t (*slave_xfer)(struct rt_i2c_bus_device *bus,
struct rt_i2c_msg msgs[],
rt_uint32_t num);
rt_err_t (*i2c_bus_control)(struct rt_i2c_bus_device *bus,
int cmd,
void *args);
};
/**
* @brief I2C Bus Device
*/
struct rt_i2c_bus_device
{
struct rt_device parent;
const struct rt_i2c_bus_device_ops *ops;
rt_uint16_t flags;
struct rt_mutex lock;
rt_uint32_t timeout;
rt_uint32_t retries;
void *priv;
};
/**
* @brief I2C Client
*/
struct rt_i2c_client
{
#ifdef RT_USING_DM
struct rt_device parent;
const char *name;
const struct rt_i2c_device_id *id;
const struct rt_ofw_node_id *ofw_id;
#endif
struct rt_i2c_bus_device *bus;
rt_uint16_t client_addr;
};
#ifdef RT_USING_DM
struct rt_i2c_device_id
{
char name[20];
void *data;
};
struct rt_i2c_driver
{
struct rt_driver parent;
const struct rt_i2c_device_id *ids;
const struct rt_ofw_node_id *ofw_ids;
rt_err_t (*probe)(struct rt_i2c_client *client);
rt_err_t (*remove)(struct rt_i2c_client *client);
rt_err_t (*shutdown)(struct rt_i2c_client *client);
};
rt_err_t rt_i2c_driver_register(struct rt_i2c_driver *driver);
rt_err_t rt_i2c_device_register(struct rt_i2c_client *client);
#define RT_I2C_DRIVER_EXPORT(driver) RT_DRIVER_EXPORT(driver, i2c, BUILIN)
#endif /* RT_USING_DM */
/**
* @brief I2C Bus Device Initialization
*
* @param bus the I2C bus device
* @param name the name of I2C bus device
*
* @return rt_err_t error code
*/
rt_err_t rt_i2c_bus_device_device_init(struct rt_i2c_bus_device *bus,
const char *name);
/**
* @brief I2C Bus Device Register
*
* @param bus the I2C bus device
* @param bus_name the name of I2C bus device
*
* @return rt_err_t error code
*/
rt_err_t rt_i2c_bus_device_register(struct rt_i2c_bus_device *bus,
const char *bus_name);
/**
* @brief I2C Bus Device Find
*
* @param bus_name the name of I2C bus device
*
* @return rt_i2c_bus_device the I2C bus device
*/
struct rt_i2c_bus_device *rt_i2c_bus_device_find(const char *bus_name);
/**
* @brief I2C data transmission.
*
* @param bus the I2C bus device
* @param msgs the I2C message list
* @param num the number of I2C message
*
* @return rt_ssize_t the actual length of transmitted
*/
rt_ssize_t rt_i2c_transfer(struct rt_i2c_bus_device *bus,
struct rt_i2c_msg msgs[],
rt_uint32_t num);
/**
* @brief I2C Control
*
* @param bus the I2C bus device
* @param cmd the I2C control command
* @param args the I2C control arguments
*
* @return rt_err_t error code
*/
rt_err_t rt_i2c_control(struct rt_i2c_bus_device *bus,
int cmd,
void *args);
/**
* @brief I2C Master Send
*
* @param bus the I2C bus device
* @param addr the I2C slave address
* @param flags the I2C flags
* @param buf the I2C send buffer
* @param count the I2C send buffer length
*
* @return rt_ssize_t the actual length of transmitted
*/
rt_ssize_t rt_i2c_master_send(struct rt_i2c_bus_device *bus,
rt_uint16_t addr,
rt_uint16_t flags,
const rt_uint8_t *buf,
rt_uint32_t count);
/**
* @brief I2C Master Receive
*
* @param bus the I2C bus device
* @param addr the I2C slave address
* @param flags the I2C flags
* @param buf the I2C receive buffer
* @param count the I2C receive buffer length
*
* @return rt_ssize_t the actual length of received
*/
rt_ssize_t rt_i2c_master_recv(struct rt_i2c_bus_device *bus,
rt_uint16_t addr,
rt_uint16_t flags,
rt_uint8_t *buf,
rt_uint32_t count);
rt_inline rt_err_t rt_i2c_bus_lock(struct rt_i2c_bus_device *bus, rt_tick_t timeout)
{
return rt_mutex_take(&bus->lock, timeout);
}
rt_inline rt_err_t rt_i2c_bus_unlock(struct rt_i2c_bus_device *bus)
{
return rt_mutex_release(&bus->lock);
}
#ifdef __cplusplus
}
#endif
/*! @}*/
#endif

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-04-25 weety first version
*/
#ifndef __DEV_I2C_BIT_OPS_H__
#define __DEV_I2C_BIT_OPS_H__
#ifdef __cplusplus
extern "C" {
#endif
struct rt_i2c_bit_ops
{
void *data; /* private data for lowlevel routines */
void (*set_sda)(void *data, rt_int32_t state);
void (*set_scl)(void *data, rt_int32_t state);
rt_int32_t (*get_sda)(void *data);
rt_int32_t (*get_scl)(void *data);
void (*udelay)(rt_uint32_t us);
rt_uint32_t delay_us; /* scl and sda line delay */
rt_uint32_t timeout; /* in tick */
void (*pin_init)(void);
rt_bool_t i2c_pin_init_flag;
};
rt_err_t rt_i2c_bit_add_bus(struct rt_i2c_bus_device *bus,
const char *bus_name);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-26 GuEe-GUI first version
*/
#ifndef __DEV_I2C_DM_H__
#define __DEV_I2C_DM_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/core/bus.h>
/* I2C Frequency Modes */
#define I2C_MAX_STANDARD_MODE_FREQ 100000
#define I2C_MAX_FAST_MODE_FREQ 400000
#define I2C_MAX_FAST_MODE_PLUS_FREQ 1000000
#define I2C_MAX_TURBO_MODE_FREQ 1400000
#define I2C_MAX_HIGH_SPEED_MODE_FREQ 3400000
#define I2C_MAX_ULTRA_FAST_MODE_FREQ 5000000
struct i2c_timings
{
rt_uint32_t bus_freq_hz; /* the bus frequency in Hz */
rt_uint32_t scl_rise_ns; /* time SCL signal takes to rise in ns; t(r) in the I2C specification */
rt_uint32_t scl_fall_ns; /* time SCL signal takes to fall in ns; t(f) in the I2C specification */
rt_uint32_t scl_int_delay_ns; /* time IP core additionally needs to setup SCL in ns */
rt_uint32_t sda_fall_ns; /* time SDA signal takes to fall in ns; t(f) in the I2C specification */
rt_uint32_t sda_hold_ns; /* time IP core additionally needs to hold SDA in ns */
rt_uint32_t digital_filter_width_ns; /* width in ns of spikes on i2c lines that the IP core digital filter can filter out */
rt_uint32_t analog_filter_cutoff_freq_hz; /* threshold frequency for the low pass IP core analog filter */
};
#ifdef RT_USING_OFW
rt_err_t i2c_timings_ofw_parse(struct rt_ofw_node *dev_np, struct i2c_timings *timings,
rt_bool_t use_defaults);
#else
rt_inline rt_err_t i2c_timings_ofw_parse(struct rt_ofw_node *dev_np, struct i2c_timings *timings,
rt_bool_t use_defaults)
{
return RT_EOK;
}
#endif /* RT_USING_OFW */
void i2c_bus_scan_clients(struct rt_i2c_bus_device *bus);
#endif /* __DEV_I2C_DM_H__ */

View File

@@ -0,0 +1,195 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-06-15 hichard first version
* 2024-05-25 HPMicro add strobe support
*/
#ifndef __DEV_MMC_H__
#define __DEV_MMC_H__
#include <rtthread.h>
#include <drivers/mmcsd_host.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* EXT_CSD fields
*/
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
#define EXT_CSD_HPI_MGMT 161 /* R/W */
#define EXT_CSD_RST_N_FUNCTION 162 /* R/W */
#define EXT_CSD_BKOPS_EN 163 /* R/W */
#define EXT_CSD_BKOPS_START 164 /* W */
#define EXT_CSD_SANITIZE_START 165 /* W */
#define EXT_CSD_WR_REL_PARAM 166 /* RO */
#define EXT_CSD_RPMB_MULT 168 /* RO */
#define EXT_CSD_BOOT_WP 173 /* R/W */
#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
#define EXT_CSD_PART_CONFIG 179 /* R/W */
#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
#define EXT_CSD_BUS_WIDTH 183 /* R/W */
#define EXT_CSD_STROBE_SUPPORT 184 /* RO */
#define EXT_CSD_HS_TIMING 185 /* R/W */
#define EXT_CSD_POWER_CLASS 187 /* R/W */
#define EXT_CSD_REV 192 /* RO */
#define EXT_CSD_STRUCTURE 194 /* RO */
#define EXT_CSD_CARD_TYPE 196 /* RO */
#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */
#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
#define EXT_CSD_PWR_CL_52_195 200 /* RO */
#define EXT_CSD_PWR_CL_26_195 201 /* RO */
#define EXT_CSD_PWR_CL_52_360 202 /* RO */
#define EXT_CSD_PWR_CL_26_360 203 /* RO */
#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */
#define EXT_CSD_S_A_TIMEOUT 217 /* RO */
#define EXT_CSD_REL_WR_SEC_C 222 /* RO */
#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */
#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */
#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */
#define EXT_CSD_BOOT_MULT 226 /* RO */
#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */
#define EXT_CSD_SEC_ERASE_MULT 230 /* RO */
#define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */
#define EXT_CSD_TRIM_MULT 232 /* RO */
#define EXT_CSD_PWR_CL_200_195 236 /* RO */
#define EXT_CSD_PWR_CL_200_360 237 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */
#define EXT_CSD_BKOPS_STATUS 246 /* RO */
#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
/*
* EXT_CSD field definitions
*/
#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40)
#define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10)
#define EXT_CSD_BOOT_WP_B_PERM_WP_EN (0x04)
#define EXT_CSD_BOOT_WP_B_PWR_WP_EN (0x01)
#define EXT_CSD_PART_CONFIG_ACC_MASK (0x7)
#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1)
#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3)
#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
#define EXT_CSD_PART_SUPPORT_PART_EN (0x1)
#define EXT_CSD_CMD_SET_NORMAL (1<<0)
#define EXT_CSD_CMD_SET_SECURE (1<<1)
#define EXT_CSD_CMD_SET_CPSECURE (1<<2)
#define EXT_CSD_CARD_TYPE_HS_26 (1<<0) /* Card can run at 26MHz */
#define EXT_CSD_CARD_TYPE_HS_52 (1<<1) /* Card can run at 52MHz */
#define EXT_CSD_CARD_TYPE_HS (EXT_CSD_CARD_TYPE_HS_26 | \
EXT_CSD_CARD_TYPE_HS_52)
#define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */
/* DDR mode @1.8V or 3V I/O */
#define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */
/* DDR mode @1.2V I/O */
#define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \
| EXT_CSD_CARD_TYPE_DDR_1_2V)
#define EXT_CSD_CARD_TYPE_HS200_1_8V (1<<4) /* Card can run at 200MHz */
#define EXT_CSD_CARD_TYPE_HS200_1_2V (1<<5) /* Card can run at 200MHz */
/* SDR mode @1.2V I/O */
#define EXT_CSD_CARD_TYPE_HS200 (EXT_CSD_CARD_TYPE_HS200_1_8V | \
EXT_CSD_CARD_TYPE_HS200_1_2V)
#define EXT_CSD_CARD_TYPE_HS400_1_8V (1<<6) /* Card can run at 200MHz DDR, 1.8V */
#define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */
#define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \
EXT_CSD_CARD_TYPE_HS400_1_2V)
#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */
#define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */
#define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */
#define EXT_CSD_DDR_BUS_WIDTH_8_EH_DS 0x86/* Card is in 8 bit DDR mode with Enhanced Data Strobe */
#define EXT_CSD_TIMING_BC 0 /* Backwards compatibility */
#define EXT_CSD_TIMING_HS 1 /* High speed */
#define EXT_CSD_TIMING_HS200 2 /* HS200 */
#define EXT_CSD_TIMING_HS400 3 /* HS400 */
#define EXT_CSD_SEC_ER_EN BIT(0)
#define EXT_CSD_SEC_BD_BLK_EN BIT(2)
#define EXT_CSD_SEC_GB_CL_EN BIT(4)
#define EXT_CSD_SEC_SANITIZE BIT(6) /* v4.5 only */
#define EXT_CSD_RST_N_EN_MASK 0x3
#define EXT_CSD_RST_N_ENABLED 1 /* RST_n is enabled on card */
#define EXT_CSD_NO_POWER_NOTIFICATION 0
#define EXT_CSD_POWER_ON 1
#define EXT_CSD_POWER_OFF_SHORT 2
#define EXT_CSD_POWER_OFF_LONG 3
#define EXT_CSD_PWR_CL_8BIT_MASK 0xF0 /* 8 bit PWR CLS */
#define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
#define EXT_CSD_PACKED_EVENT_EN BIT(3)
/*
* EXCEPTION_EVENT_STATUS field
*/
#define EXT_CSD_URGENT_BKOPS BIT(0)
#define EXT_CSD_DYNCAP_NEEDED BIT(1)
#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
#define EXT_CSD_PACKED_FAILURE BIT(3)
#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0)
#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1)
/*
* BKOPS status level
*/
#define EXT_CSD_BKOPS_LEVEL_2 0x2
/*
* MMC_SWITCH access modes
*/
#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */
#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */
#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
/*
* extern function
*/
rt_err_t mmc_send_op_cond(struct rt_mmcsd_host *host, rt_uint32_t ocr, rt_uint32_t *rocr);
rt_int32_t init_mmc(struct rt_mmcsd_host *host, rt_uint32_t ocr);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,260 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-07-25 weety first version
*/
#ifndef __DEV_MMCSD_CORE_H__
#define __DEV_MMCSD_CORE_H__
#include <rtthread.h>
#include <drivers/mmcsd_host.h>
#include <drivers/mmcsd_card.h>
#include <drivers/mmcsd_cmd.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef RT_MMCSD_DBG
#define mmcsd_dbg(fmt, ...) rt_kprintf(fmt, ##__VA_ARGS__)
#else
#define mmcsd_dbg(fmt, ...)
#endif
struct rt_mmcsd_data
{
rt_uint32_t blksize;
rt_uint32_t blks;
rt_uint32_t *buf;
rt_int32_t err;
rt_uint32_t flags;
#define DATA_DIR_WRITE (1 << 0)
#define DATA_DIR_READ (1 << 1)
#define DATA_STREAM (1 << 2)
unsigned int bytes_xfered;
struct rt_mmcsd_cmd *stop; /* stop command */
struct rt_mmcsd_req *mrq; /* associated request */
rt_uint32_t timeout_ns;
rt_uint32_t timeout_clks;
void *sg; /* scatter list */
rt_uint16_t sg_len; /* size of scatter list */
rt_int16_t sg_count; /* mapped sg entries */
rt_ubase_t host_cookie; /* host driver private data */
};
struct rt_mmcsd_cmd
{
rt_uint32_t cmd_code;
rt_uint32_t arg;
rt_uint32_t resp[4];
rt_uint32_t flags;
/*rsponse types
*bits:0~3
*/
#define RESP_MASK (0xF)
#define RESP_NONE (0)
#define RESP_R1 (1 << 0)
#define RESP_R1B (2 << 0)
#define RESP_R2 (3 << 0)
#define RESP_R3 (4 << 0)
#define RESP_R4 (5 << 0)
#define RESP_R6 (6 << 0)
#define RESP_R7 (7 << 0)
#define RESP_R5 (8 << 0) /*SDIO command response type*/
/*command types
*bits:4~5
*/
#define CMD_MASK (3 << 4) /* command type */
#define CMD_AC (0 << 4)
#define CMD_ADTC (1 << 4)
#define CMD_BC (2 << 4)
#define CMD_BCR (3 << 4)
#define resp_type(cmd) ((cmd)->flags & RESP_MASK)
/*spi rsponse types
*bits:6~8
*/
#define RESP_SPI_MASK (0x7 << 6)
#define RESP_SPI_R1 (1 << 6)
#define RESP_SPI_R1B (2 << 6)
#define RESP_SPI_R2 (3 << 6)
#define RESP_SPI_R3 (4 << 6)
#define RESP_SPI_R4 (5 << 6)
#define RESP_SPI_R5 (6 << 6)
#define RESP_SPI_R7 (7 << 6)
#define spi_resp_type(cmd) ((cmd)->flags & RESP_SPI_MASK)
/*
* These are the command types.
*/
#define cmd_type(cmd) ((cmd)->flags & CMD_MASK)
rt_int32_t retries; /* max number of retries */
rt_int32_t err;
unsigned int busy_timeout; /* busy detect timeout in ms */
struct rt_mmcsd_data *data;
struct rt_mmcsd_req *mrq; /* associated request */
};
struct rt_mmcsd_req
{
struct rt_mmcsd_data *data;
struct rt_mmcsd_cmd *cmd;
struct rt_mmcsd_cmd *stop;
struct rt_mmcsd_cmd *sbc; /* SET_BLOCK_COUNT for multiblock */
/* Allow other commands during this ongoing data transfer or busy wait */
int cap_cmd_during_tfr;
};
/*the following is response bit*/
#define R1_OUT_OF_RANGE (1 << 31) /* er, c */
#define R1_ADDRESS_ERROR (1 << 30) /* erx, c */
#define R1_BLOCK_LEN_ERROR (1 << 29) /* er, c */
#define R1_ERASE_SEQ_ERROR (1 << 28) /* er, c */
#define R1_ERASE_PARAM (1 << 27) /* ex, c */
#define R1_WP_VIOLATION (1 << 26) /* erx, c */
#define R1_CARD_IS_LOCKED (1 << 25) /* sx, a */
#define R1_LOCK_UNLOCK_FAILED (1 << 24) /* erx, c */
#define R1_COM_CRC_ERROR (1 << 23) /* er, b */
#define R1_ILLEGAL_COMMAND (1 << 22) /* er, b */
#define R1_CARD_ECC_FAILED (1 << 21) /* ex, c */
#define R1_CC_ERROR (1 << 20) /* erx, c */
#define R1_ERROR (1 << 19) /* erx, c */
#define R1_UNDERRUN (1 << 18) /* ex, c */
#define R1_OVERRUN (1 << 17) /* ex, c */
#define R1_CID_CSD_OVERWRITE (1 << 16) /* erx, c, CID/CSD overwrite */
#define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */
#define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */
#define R1_ERASE_RESET (1 << 13) /* sr, c */
#define R1_STATUS(x) (x & 0xFFFFE000)
#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */
#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
#define R1_APP_CMD (1 << 5) /* sr, c */
#define R1_SPI_IDLE (1 << 0)
#define R1_SPI_ERASE_RESET (1 << 1)
#define R1_SPI_ILLEGAL_COMMAND (1 << 2)
#define R1_SPI_COM_CRC (1 << 3)
#define R1_SPI_ERASE_SEQ (1 << 4)
#define R1_SPI_ADDRESS (1 << 5)
#define R1_SPI_PARAMETER (1 << 6)
/* R1 bit 7 is always zero */
#define R2_SPI_CARD_LOCKED (1 << 8)
#define R2_SPI_WP_ERASE_SKIP (1 << 9) /* or lock/unlock fail */
#define R2_SPI_LOCK_UNLOCK_FAIL R2_SPI_WP_ERASE_SKIP
#define R2_SPI_ERROR (1 << 10)
#define R2_SPI_CC_ERROR (1 << 11)
#define R2_SPI_CARD_ECC_ERROR (1 << 12)
#define R2_SPI_WP_VIOLATION (1 << 13)
#define R2_SPI_ERASE_PARAM (1 << 14)
#define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */
#define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE
#define CARD_BUSY 0x80000000 /* Card Power up status bit */
/* R5 response bits */
#define R5_COM_CRC_ERROR (1 << 15)
#define R5_ILLEGAL_COMMAND (1 << 14)
#define R5_ERROR (1 << 11)
#define R5_FUNCTION_NUMBER (1 << 9)
#define R5_OUT_OF_RANGE (1 << 8)
#define R5_STATUS(x) (x & 0xCB00)
#define R5_IO_CURRENT_STATE(x) ((x & 0x3000) >> 12)
/**
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
rt_inline rt_uint32_t __rt_fls(rt_uint32_t val)
{
rt_uint32_t bit = 32;
if (!val)
return 0;
if (!(val & 0xffff0000u))
{
val <<= 16;
bit -= 16;
}
if (!(val & 0xff000000u))
{
val <<= 8;
bit -= 8;
}
if (!(val & 0xf0000000u))
{
val <<= 4;
bit -= 4;
}
if (!(val & 0xc0000000u))
{
val <<= 2;
bit -= 2;
}
if (!(val & 0x80000000u))
{
bit -= 1;
}
return bit;
}
#define MMCSD_HOST_PLUGED 0
#define MMCSD_HOST_UNPLUGED 1
rt_int32_t mmcsd_excute_tuning(struct rt_mmcsd_card *card);
int mmcsd_wait_cd_changed(rt_int32_t timeout);
void mmcsd_host_lock(struct rt_mmcsd_host *host);
void mmcsd_host_unlock(struct rt_mmcsd_host *host);
void mmcsd_req_complete(struct rt_mmcsd_host *host);
void mmcsd_send_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req);
rt_int32_t mmcsd_send_cmd(struct rt_mmcsd_host *host, struct rt_mmcsd_cmd *cmd, int retries);
rt_int32_t mmcsd_go_idle(struct rt_mmcsd_host *host);
rt_int32_t mmcsd_spi_read_ocr(struct rt_mmcsd_host *host, rt_int32_t high_capacity, rt_uint32_t *ocr);
rt_int32_t mmcsd_all_get_cid(struct rt_mmcsd_host *host, rt_uint32_t *cid);
rt_int32_t mmcsd_get_cid(struct rt_mmcsd_host *host, rt_uint32_t *cid);
rt_int32_t mmcsd_get_csd(struct rt_mmcsd_card *card, rt_uint32_t *csd);
rt_int32_t mmcsd_select_card(struct rt_mmcsd_card *card);
rt_int32_t mmcsd_deselect_cards(struct rt_mmcsd_card *host);
rt_int32_t mmcsd_spi_use_crc(struct rt_mmcsd_host *host, rt_int32_t use_crc);
void mmcsd_set_chip_select(struct rt_mmcsd_host *host, rt_int32_t mode);
void mmcsd_set_clock(struct rt_mmcsd_host *host, rt_uint32_t clk);
void mmcsd_set_bus_mode(struct rt_mmcsd_host *host, rt_uint32_t mode);
void mmcsd_set_bus_width(struct rt_mmcsd_host *host, rt_uint32_t width);
void mmcsd_set_timing(struct rt_mmcsd_host *host, rt_uint32_t timing);
void mmcsd_set_data_timeout(struct rt_mmcsd_data *data, const struct rt_mmcsd_card *card);
rt_uint32_t mmcsd_select_voltage(struct rt_mmcsd_host *host, rt_uint32_t ocr);
void mmcsd_change(struct rt_mmcsd_host *host);
void mmcsd_detect(void *param);
void mmcsd_host_init(struct rt_mmcsd_host *host);
struct rt_mmcsd_host *mmcsd_alloc_host(void);
void mmcsd_free_host(struct rt_mmcsd_host *host);
int rt_mmcsd_core_init(void);
rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card);
void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,328 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-01-20 Bernard the first version
* 2017-10-20 ZYH add mode open drain and input pull down
*/
#ifndef DEV_PIN_H__
#define DEV_PIN_H__
#include <rtthread.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup Pin Pin
*
* @brief Pin driver api
*
* <b>Example</b>
* @code {.c}
* #include <rtthread.h>
* #include <rtdevice.h>
*
*
* #ifndef BEEP_PIN_NUM
* #define BEEP_PIN_NUM 35 // PB0
* #endif
* #ifndef KEY0_PIN_NUM
* #define KEY0_PIN_NUM 55 // PD8
* #endif
* #ifndef KEY1_PIN_NUM
* #define KEY1_PIN_NUM 56 // PD9
* #endif
*
* void beep_on(void *args)
* {
* rt_kprintf("turn on beep!\n");
*
* rt_pin_write(BEEP_PIN_NUM, PIN_HIGH);
* }
*
* void beep_off(void *args)
* {
* rt_kprintf("turn off beep!\n");
*
* rt_pin_write(BEEP_PIN_NUM, PIN_LOW);
* }
*
* static void pin_beep_sample(void)
* {
* rt_pin_mode(BEEP_PIN_NUM, PIN_MODE_OUTPUT);
* rt_pin_write(BEEP_PIN_NUM, PIN_LOW);
*
* rt_pin_mode(KEY0_PIN_NUM, PIN_MODE_INPUT_PULLUP);
* rt_pin_attach_irq(KEY0_PIN_NUM, PIN_IRQ_MODE_FALLING, beep_on, RT_NULL);
* rt_pin_irq_enable(KEY0_PIN_NUM, PIN_IRQ_ENABLE);
*
*
* rt_pin_mode(KEY1_PIN_NUM, PIN_MODE_INPUT_PULLUP);
* rt_pin_attach_irq(KEY1_PIN_NUM, PIN_IRQ_MODE_FALLING, beep_off, RT_NULL);
* rt_pin_irq_enable(KEY1_PIN_NUM, PIN_IRQ_ENABLE);
* }
*
* MSH_CMD_EXPORT(pin_beep_sample, pin beep sample);
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup Pin
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifdef RT_USING_DM
#include <drivers/pic.h>
struct rt_pin_irqchip
{
struct rt_pic parent;
int irq;
rt_base_t pin_range[2];
};
struct rt_pin_irq_hdr;
#endif /* RT_USING_DM */
/**
* @brief pin device structure
*/
struct rt_device_pin
{
struct rt_device parent;
#ifdef RT_USING_DM
/* MUST keep the order member after parent */
struct rt_pin_irqchip irqchip;
/* Fill by DM */
rt_base_t pin_start;
rt_size_t pin_nr;
rt_list_t list;
struct rt_pin_irq_hdr *legacy_isr;
#endif /* RT_USING_DM */
const struct rt_pin_ops *ops;
};
#define PIN_NONE (-1)
#define PIN_LOW 0x00 /*!< low level */
#define PIN_HIGH 0x01 /*!< high level */
#define PIN_MODE_OUTPUT 0x00 /*!< output mode */
#define PIN_MODE_INPUT 0x01 /*!< input mode */
#define PIN_MODE_INPUT_PULLUP 0x02 /*!< input mode with pull-up */
#define PIN_MODE_INPUT_PULLDOWN 0x03 /*!< input mode with pull-down */
#define PIN_MODE_OUTPUT_OD 0x04 /*!< output mode with open-drain */
#ifdef RT_USING_PINCTRL
enum
{
PIN_CONFIG_BIAS_BUS_HOLD,
PIN_CONFIG_BIAS_DISABLE,
PIN_CONFIG_BIAS_HIGH_IMPEDANCE,
PIN_CONFIG_BIAS_PULL_DOWN,
PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
PIN_CONFIG_BIAS_PULL_UP,
PIN_CONFIG_DRIVE_OPEN_DRAIN,
PIN_CONFIG_DRIVE_OPEN_SOURCE,
PIN_CONFIG_DRIVE_PUSH_PULL,
PIN_CONFIG_DRIVE_STRENGTH,
PIN_CONFIG_DRIVE_STRENGTH_UA,
PIN_CONFIG_INPUT_DEBOUNCE,
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
PIN_CONFIG_MODE_LOW_POWER,
PIN_CONFIG_MODE_PWM,
PIN_CONFIG_OUTPUT,
PIN_CONFIG_OUTPUT_ENABLE,
PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS,
PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_POWER_SOURCE,
PIN_CONFIG_SKEW_DELAY,
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_END = 0x7f,
PIN_CONFIG_MAX = 0xff,
};
#endif /* RT_USING_PINCTRL */
#define PIN_IRQ_MODE_RISING 0x00 /*!< rising edge trigger */
#define PIN_IRQ_MODE_FALLING 0x01 /*!< falling edge trigger */
#define PIN_IRQ_MODE_RISING_FALLING 0x02 /*!< rising and falling edge trigger */
#define PIN_IRQ_MODE_HIGH_LEVEL 0x03 /*!< high level trigger */
#define PIN_IRQ_MODE_LOW_LEVEL 0x04 /*!< low level trigger */
#define PIN_IRQ_DISABLE 0x00 /*!< disable irq */
#define PIN_IRQ_ENABLE 0x01 /*!< enable irq */
#define PIN_IRQ_PIN_NONE PIN_NONE /*!< no pin irq */
/**
* @brief pin mode structure
*/
struct rt_device_pin_mode
{
rt_base_t pin;
rt_uint8_t mode; /* e.g. PIN_MODE_OUTPUT */
};
/**
* @brief pin value structure
*/
struct rt_device_pin_value
{
rt_base_t pin;
rt_uint8_t value; /* PIN_LOW or PIN_HIGH */
};
/**
* @brief pin irq structure
*/
struct rt_pin_irq_hdr
{
rt_base_t pin;
rt_uint8_t mode; /* e.g. PIN_IRQ_MODE_RISING */
void (*hdr)(void *args);
void *args;
};
#ifdef RT_USING_PINCTRL
/**
* @brief pin control configure structure
*/
struct rt_pin_ctrl_conf_params
{
const char *propname;
rt_uint32_t param;
rt_uint32_t default_value;
};
#endif /* RT_USING_PINCTRL */
/**
* @brief pin device operations
*/
struct rt_pin_ops
{
void (*pin_mode)(struct rt_device *device, rt_base_t pin, rt_uint8_t mode);
void (*pin_write)(struct rt_device *device, rt_base_t pin, rt_uint8_t value);
rt_ssize_t (*pin_read)(struct rt_device *device, rt_base_t pin);
rt_err_t (*pin_attach_irq)(struct rt_device *device, rt_base_t pin,
rt_uint8_t mode, void (*hdr)(void *args), void *args);
rt_err_t (*pin_detach_irq)(struct rt_device *device, rt_base_t pin);
rt_err_t (*pin_irq_enable)(struct rt_device *device, rt_base_t pin, rt_uint8_t enabled);
rt_base_t (*pin_get)(const char *name);
rt_err_t (*pin_debounce)(struct rt_device *device, rt_base_t pin, rt_uint32_t debounce);
#ifdef RT_USING_DM
rt_err_t (*pin_irq_mode)(struct rt_device *device, rt_base_t pin, rt_uint8_t mode);
rt_ssize_t (*pin_parse)(struct rt_device *device, struct rt_ofw_cell_args *args, rt_uint32_t *flags);
#endif
#ifdef RT_USING_PINCTRL
rt_err_t (*pin_ctrl_confs_apply)(struct rt_device *device, void *fw_conf_np);
#endif /* RT_USING_PINCTRL */
};
/**
* @brief register a pin device
* @param name the name of pin device
* @param ops the operations of pin device
* @param user_data the user data of pin device
* @return int error code
*/
int rt_device_pin_register(const char *name, const struct rt_pin_ops *ops, void *user_data);
/**
* @brief set pin mode
* @param pin the pin number
* @param mode the pin mode
*/
void rt_pin_mode(rt_base_t pin, rt_uint8_t mode);
/**
* @brief write pin value
* @param pin the pin number
* @param value the pin value
*/
void rt_pin_write(rt_base_t pin, rt_ssize_t value);
/**
* @brief read pin value
* @param pin the pin number
* @return rt_ssize_t the pin value
*/
rt_ssize_t rt_pin_read(rt_base_t pin);
/**
* @brief get pin number by name
* @param name the pin name
* @return rt_base_t the pin number
*/
rt_base_t rt_pin_get(const char *name);
/**
* @brief bind the pin interrupt callback function
* @param pin the pin number
* @param mode the irq mode
* @param hdr the irq callback function
* @param args the argument of the callback function
* @return rt_err_t error code
*/
rt_err_t rt_pin_attach_irq(rt_base_t pin, rt_uint8_t mode,
void (*hdr)(void *args), void *args);
/**
* @brief detach the pin interrupt callback function
* @param pin the pin number
* @return rt_err_t error code
*/
rt_err_t rt_pin_detach_irq(rt_base_t pin);
/**
* @brief enable or disable the pin interrupt
* @param pin the pin number
* @param enabled PIN_IRQ_ENABLE or PIN_IRQ_DISABLE
* @return rt_err_t error code
*/
rt_err_t rt_pin_irq_enable(rt_base_t pin, rt_uint8_t enabled);
/**
* @brief set the pin's debounce time
* @param pin the pin number
* @param debounce time
* @return rt_err_t error code
*/
rt_err_t rt_pin_debounce(rt_base_t pin, rt_uint32_t debounce);
#ifdef RT_USING_DM
rt_ssize_t rt_pin_get_named_pin(struct rt_device *dev, const char *propname, int index,
rt_uint8_t *out_mode, rt_uint8_t *out_value);
rt_ssize_t rt_pin_get_named_pin_count(struct rt_device *dev, const char *propname);
#ifdef RT_USING_OFW
rt_ssize_t rt_ofw_get_named_pin(struct rt_ofw_node *np, const char *propname, int index,
rt_uint8_t *out_mode, rt_uint8_t *out_value);
rt_ssize_t rt_ofw_get_named_pin_count(struct rt_ofw_node *np, const char *propname);
#endif
#endif /* RT_USING_DM */
#ifdef RT_USING_PINCTRL
rt_ssize_t rt_pin_ctrl_confs_lookup(struct rt_device *device, const char *name);
rt_err_t rt_pin_ctrl_confs_apply(struct rt_device *device, int index);
rt_err_t rt_pin_ctrl_confs_apply_by_name(struct rt_device *device, const char *name);
#endif /* RT_USING_PINCTRL */
#ifdef __cplusplus
}
#endif
/*! @}*/
#endif

View File

@@ -0,0 +1,209 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-05-07 aozima the first version
* 2022-09-24 yuqi add phase and dead time configuration
*/
#ifndef __DEV_PWM_H__
#define __DEV_PWM_H__
#include <rtthread.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup PWM PWM
*
* @brief PWM driver api
*
* <b>Example</b>
* @code {.c}
* #include <rtthread.h>
* #include <rtdevice.h>
*
* #define PWM_DEV_NAME "pwm3" // PWM设备名称
* #define PWM_DEV_CHANNEL 4 // PWM通道
*
* struct rt_device_pwm *pwm_dev; // PWM设备句柄
*
* static int pwm_led_sample(int argc, char *argv[])
* {
* rt_uint32_t period, pulse, dir;
*
* period = 500000; // 周期为0.5ms单位为纳秒ns
* dir = 1; // PWM脉冲宽度值的增减方向
* pulse = 0; // PWM脉冲宽度值单位为纳秒ns
*
* // 查找设备
* pwm_dev = (struct rt_device_pwm *)rt_device_find(PWM_DEV_NAME);
* if (pwm_dev == RT_NULL)
* {
* rt_kprintf("pwm sample run failed! can't find %s device!\n", PWM_DEV_NAME);
* return -RT_ERROR;
* }
*
* // 设置PWM周期和脉冲宽度默认值
* rt_pwm_set(pwm_dev, PWM_DEV_CHANNEL, period, pulse);
* // 使能设备
* rt_pwm_enable(pwm_dev, PWM_DEV_CHANNEL);
*
* while (1)
* {
* rt_thread_mdelay(50);
* if (dir)
* {
* pulse += 5000; // 从0值开始每次增加5000ns
* }
* else
* {
* pulse -= 5000; // 从最大值开始每次减少5000ns
* }
* if (pulse >= period)
* {
* dir = 0;
* }
* if (0 == pulse)
* {
* dir = 1;
* }
*
* // 设置PWM周期和脉冲宽度
* rt_pwm_set(pwm_dev, PWM_DEV_CHANNEL, period, pulse);
* }
* }
*
* MSH_CMD_EXPORT(pwm_led_sample, pwm sample);
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup PWM
* @{
*/
#define PWM_CMD_ENABLE (RT_DEVICE_CTRL_BASE(PWM) + 0)
#define PWM_CMD_DISABLE (RT_DEVICE_CTRL_BASE(PWM) + 1)
#define PWM_CMD_SET (RT_DEVICE_CTRL_BASE(PWM) + 2)
#define PWM_CMD_GET (RT_DEVICE_CTRL_BASE(PWM) + 3)
#define PWMN_CMD_ENABLE (RT_DEVICE_CTRL_BASE(PWM) + 4)
#define PWMN_CMD_DISABLE (RT_DEVICE_CTRL_BASE(PWM) + 5)
#define PWM_CMD_SET_PERIOD (RT_DEVICE_CTRL_BASE(PWM) + 6)
#define PWM_CMD_SET_PULSE (RT_DEVICE_CTRL_BASE(PWM) + 7)
#define PWM_CMD_SET_DEAD_TIME (RT_DEVICE_CTRL_BASE(PWM) + 8)
#define PWM_CMD_SET_PHASE (RT_DEVICE_CTRL_BASE(PWM) + 9)
#define PWM_CMD_ENABLE_IRQ (RT_DEVICE_CTRL_BASE(PWM) + 10)
#define PWM_CMD_DISABLE_IRQ (RT_DEVICE_CTRL_BASE(PWM) + 11)
/**
* @brief PWM configuration
*/
struct rt_pwm_configuration
{
rt_uint32_t channel; /* 0 ~ n or 0 ~ -n, which depends on specific MCU requirements */
rt_uint32_t period; /* unit:ns 1ns~4.29s:1Ghz~0.23hz */
rt_uint32_t pulse; /* unit:ns (pulse<=period) */
rt_uint32_t dead_time; /* unit:ns */
rt_uint32_t phase; /*unit: degree, 0~360, which is the phase of pwm output, */
/*
* RT_TRUE : The channel of pwm is complememtary.
* RT_FALSE : The channel of pwm is nomal.
*/
rt_bool_t complementary;
};
struct rt_device_pwm;
/**
* @brief PWM operations
*/
struct rt_pwm_ops
{
rt_err_t (*control)(struct rt_device_pwm *device, int cmd, void *arg);
};
/**
* @brief PWM device
*/
struct rt_device_pwm
{
struct rt_device parent;
const struct rt_pwm_ops *ops;
};
/**
* @brief register a PWM device
* @param device the PWM device
* @param name the name of PWM device
* @param ops the operations of PWM device
* @param user_data the user data of PWM device
* @return rt_err_t error code
*/
rt_err_t rt_device_pwm_register(struct rt_device_pwm *device, const char *name, const struct rt_pwm_ops *ops, const void *user_data);
/**
* @brief enable the PWM channel
* @param device the PWM device
* @param channel the channel of PWM
* @return rt_err_t error code
*/
rt_err_t rt_pwm_enable(struct rt_device_pwm *device, int channel);
/**
* @brief disable the PWM channel
* @param device the PWM device
* @param channel the channel of PWM
* @return rt_err_t error code
*/
rt_err_t rt_pwm_disable(struct rt_device_pwm *device, int channel);
/**
* @brief set the PWM channel
* @param device the PWM device
* @param channel the channel of PWM
* @param period the period of PWM
* @param pulse the pulse of PWM
* @return rt_err_t error code
*/
rt_err_t rt_pwm_set(struct rt_device_pwm *device, int channel, rt_uint32_t period, rt_uint32_t pulse);
/**
* @brief set the PWM channel period
* @param device the PWM device
* @param channel the channel of PWM
* @param period the period of PWM
* @return rt_err_t error code
*/
rt_err_t rt_pwm_set_period(struct rt_device_pwm *device, int channel, rt_uint32_t period);
/**
* @brief set the PWM channel pulse
* @param device the PWM device
* @param channel the channel of PWM
* @param pulse the period of PWM
* @return rt_err_t error code
*/
rt_err_t rt_pwm_set_pulse(struct rt_device_pwm *device, int channel, rt_uint32_t pulse);
/**
* @brief set the dead zone time of PWM
* @param device the PWM device
* @param channel the channel of PWM
* @param dead_time dead zone time
* @return rt_err_t error code
*/
rt_err_t rt_pwm_set_dead_time(struct rt_device_pwm *device, int channel, rt_uint32_t dead_time);
/**
* @brief set the phase of PWM
* @param device the PWM device
* @param channel the channel of PWM
* @param phase phase
* @return rt_err_t error code
*/
rt_err_t rt_pwm_set_phase(struct rt_device_pwm *device, int channel, rt_uint32_t phase);
/*! @}*/
#endif /* __DEV_PWM_H__ */

View File

@@ -0,0 +1,194 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-10-10 aozima first version.
* 2021-06-11 iysheng implement RTC framework V2.0
* 2021-07-30 Meco Man move rtc_core.h to rtc.h
* 2022-04-05 tyx add timestamp function
*/
#ifndef __DEV_RTC_H__
#define __DEV_RTC_H__
#include <rtdef.h>
#include <sys/time.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup RTC RTC
*
* @brief RTC driver api
*
* <b>Example</b>
* @code {.c}
*
* #include <rtthread.h>
* #include <rtdevice.h>
*
* #define RTC_NAME "rtc"
*
* static int rtc_sample(int argc, char *argv[])
* {
* rt_err_t ret = RT_EOK;
* time_t now;
* rt_device_t device = RT_NULL;
*
* device = rt_device_find(RTC_NAME);
* if (!device)
* {
* LOG_E("find %s failed!", RTC_NAME);
* return RT_ERROR;
* }
*
* if(rt_device_open(device, 0) != RT_EOK)
* {
* LOG_E("open %s failed!", RTC_NAME);
* return RT_ERROR;
* }
*
* ret = set_date(2018, 12, 3);
* if (ret != RT_EOK)
* {
* rt_kprintf("set RTC date failed\n");
* return ret;
* }
*
* ret = set_time(11, 15, 50);
* if (ret != RT_EOK)
* {
* rt_kprintf("set RTC time failed\n");
* return ret;
* }
*
* rt_thread_mdelay(3000);
*
* now = time(RT_NULL);
* rt_kprintf("%s\n", ctime(&now));
*
* return ret;
* }
* MSH_CMD_EXPORT(rtc_sample, rtc sample);
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup RTC
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
#define RT_DEVICE_CTRL_RTC_GET_TIME (RT_DEVICE_CTRL_BASE(RTC) + 0x01) /**< get second time */
#define RT_DEVICE_CTRL_RTC_SET_TIME (RT_DEVICE_CTRL_BASE(RTC) + 0x02) /**< set second time */
#define RT_DEVICE_CTRL_RTC_GET_TIMEVAL (RT_DEVICE_CTRL_BASE(RTC) + 0x03) /**< get timeval for gettimeofday */
#define RT_DEVICE_CTRL_RTC_SET_TIMEVAL (RT_DEVICE_CTRL_BASE(RTC) + 0x04) /**< set timeval for gettimeofday */
#define RT_DEVICE_CTRL_RTC_GET_ALARM (RT_DEVICE_CTRL_BASE(RTC) + 0x05) /**< get alarm */
#define RT_DEVICE_CTRL_RTC_SET_ALARM (RT_DEVICE_CTRL_BASE(RTC) + 0x06) /**< set alarm */
#define RT_DEVICE_CTRL_RTC_GET_TIMESPEC (RT_DEVICE_CTRL_BASE(RTC) + 0x07) /**< get timespec for clock_gettime */
#define RT_DEVICE_CTRL_RTC_SET_TIMESPEC (RT_DEVICE_CTRL_BASE(RTC) + 0x08) /**< set timespec for clock_settime */
#define RT_DEVICE_CTRL_RTC_GET_TIMERES (RT_DEVICE_CTRL_BASE(RTC) + 0x09) /**< get resolution for clock_getres */
/**
* @brief RTC alarm structure
*/
struct rt_rtc_wkalarm
{
rt_bool_t enable; /* 0 = alarm disabled, 1 = alarm enabled */
rt_int32_t tm_sec; /* alarm at tm_sec */
rt_int32_t tm_min; /* alarm at tm_min */
rt_int32_t tm_hour; /* alarm at tm_hour */
rt_int32_t tm_mday; /* alarm at tm_mday */
rt_int32_t tm_mon; /* alarm at tm_mon */
rt_int32_t tm_year; /* alarm at tm_year */
};
/**
* @brief RTC operations
*/
struct rt_rtc_ops
{
rt_err_t (*init)(void);
rt_err_t (*get_secs)(time_t *sec);
rt_err_t (*set_secs)(time_t *sec);
rt_err_t (*get_alarm)(struct rt_rtc_wkalarm *alarm);
rt_err_t (*set_alarm)(struct rt_rtc_wkalarm *alarm);
rt_err_t (*get_timeval)(struct timeval *tv);
rt_err_t (*set_timeval)(struct timeval *tv);
};
/**
* @brief RTC device structure
*/
typedef struct rt_rtc_device
{
struct rt_device parent;
const struct rt_rtc_ops *ops;
} rt_rtc_dev_t;
/**
* @brief Register a RTC device
*
* @param rtc RTC device
* @param name RTC device name
* @param flag RTC device flag
* @param data RTC device data
* @return rt_err_t error code
*/
rt_err_t rt_hw_rtc_register(rt_rtc_dev_t *rtc,
const char *name,
rt_uint32_t flag,
void *data);
/**
* @brief set date
*
* @param year year
* @param month month
* @param day day
* @return rt_err_t error code
*/
rt_err_t set_date(rt_uint32_t year, rt_uint32_t month, rt_uint32_t day);
/**
* @brief set time
*
* @param hour hour
* @param minute minute
* @param second second
* @return rt_err_t error code
*/
rt_err_t set_time(rt_uint32_t hour, rt_uint32_t minute, rt_uint32_t second);
/**
* @brief set timestamp
*
* @param timestamp A pointer to time
* @return rt_err_t error code
*/
rt_err_t set_timestamp(time_t timestamp);
/**
* @brief get timestamp
*
* @param timestamp A secondary pointer to time
* @return rt_err_t error code
*/
rt_err_t get_timestamp(time_t *timestamp);
#ifdef RT_USING_SYSTEM_WORKQUEUE
rt_err_t rt_soft_rtc_sync(void);
rt_err_t rt_soft_rtc_set_source(const char *name);
#endif
#ifdef __cplusplus
}
#endif
/*! @}*/
#endif /* __DEV_RTC_H__ */

View File

@@ -0,0 +1,45 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-07-25 weety first version
* 2024-05-26 HPMicro Add UHS-I support
*/
#ifndef __DEV_SD_H__
#define __DEV_SD_H__
#include <rtthread.h>
#include <drivers/mmcsd_host.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* SWITCH_FUNC timing
*/
#define SD_SWITCH_FUNC_TIMING_DEFAULT 0
#define SD_SWITCH_FUNC_TIMING_HS 1
#define SD_SWITCH_FUNC_TIMING_SDR50 2
#define SD_SWITCH_FUNC_TIMING_SDR104 3
#define SD_SWITCH_FUNC_TIMING_DDR50 4
rt_err_t mmcsd_send_if_cond(struct rt_mmcsd_host *host, rt_uint32_t ocr);
rt_err_t mmcsd_send_app_op_cond(struct rt_mmcsd_host *host, rt_uint32_t ocr, rt_uint32_t *rocr);
rt_err_t mmcsd_get_card_addr(struct rt_mmcsd_host *host, rt_uint32_t *rca);
rt_int32_t mmcsd_get_scr(struct rt_mmcsd_card *card, rt_uint32_t *scr);
rt_int32_t init_sd(struct rt_mmcsd_host *host, rt_uint32_t ocr);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,231 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-01-15 weety first version
*/
#ifndef __DEV_SDIO_H__
#define __DEV_SDIO_H__
#include <rtthread.h>
#include <drivers/mmcsd_host.h>
#include <drivers/mmcsd_card.h>
#include <drivers/sdio_func_ids.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Card Common Control Registers (CCCR)
*/
#define SDIO_REG_CCCR_CCCR_REV 0x00
#define SDIO_CCCR_REV_1_00 0 /* CCCR/FBR Version 1.00 */
#define SDIO_CCCR_REV_1_10 1 /* CCCR/FBR Version 1.10 */
#define SDIO_CCCR_REV_1_20 2 /* CCCR/FBR Version 1.20 */
#define SDIO_CCCR_REV_3_00 3 /* CCCR/FBR Version 2.00 */
#define SDIO_SDIO_REV_1_00 0 /* SDIO Spec Version 1.00 */
#define SDIO_SDIO_REV_1_10 1 /* SDIO Spec Version 1.10 */
#define SDIO_SDIO_REV_1_20 2 /* SDIO Spec Version 1.20 */
#define SDIO_SDIO_REV_2_00 3 /* SDIO Spec Version 2.00 */
#define SDIO_REG_CCCR_SD_REV 0x01
#define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */
#define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */
#define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */
#define SDIO_REG_CCCR_IO_EN 0x02
#define SDIO_REG_CCCR_IO_RDY 0x03
#define SDIO_REG_CCCR_INT_EN 0x04 /* Function/Master Interrupt Enable */
#define SDIO_REG_CCCR_INT_PEND 0x05 /* Function Interrupt Pending */
#define SDIO_REG_CCCR_IO_ABORT 0x06 /* function abort/card reset */
#define SDIO_REG_CCCR_BUS_IF 0x07 /* bus interface controls */
#define SDIO_BUS_WIDTH_1BIT 0x00
#define SDIO_BUS_WIDTH_4BIT 0x02
#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */
#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */
#define SDIO_BUS_ASYNC_INT 0x20
#define SDIO_BUS_CD_DISABLE 0x80 /* disable pull-up on DAT3 (pin 1) */
#define SDIO_REG_CCCR_CARD_CAPS 0x08
#define SDIO_CCCR_CAP_SDC 0x01 /* can do CMD52 while data transfer */
#define SDIO_CCCR_CAP_SMB 0x02 /* can do multi-block xfers (CMD53) */
#define SDIO_CCCR_CAP_SRW 0x04 /* supports read-wait protocol */
#define SDIO_CCCR_CAP_SBS 0x08 /* supports suspend/resume */
#define SDIO_CCCR_CAP_S4MI 0x10 /* interrupt during 4-bit CMD53 */
#define SDIO_CCCR_CAP_E4MI 0x20 /* enable ints during 4-bit CMD53 */
#define SDIO_CCCR_CAP_LSC 0x40 /* low speed card */
#define SDIO_CCCR_CAP_4BLS 0x80 /* 4 bit low speed card */
#define SDIO_REG_CCCR_CIS_PTR 0x09 /* common CIS pointer (3 bytes) */
/* Following 4 regs are valid only if SBS is set */
#define SDIO_REG_CCCR_BUS_SUSPEND 0x0c
#define SDIO_REG_CCCR_FUNC_SEL 0x0d
#define SDIO_REG_CCCR_EXEC_FLAG 0x0e
#define SDIO_REG_CCCR_READY_FLAG 0x0f
#define SDIO_REG_CCCR_FN0_BLKSIZE 0x10 /* 2bytes, 0x10~0x11 */
#define SDIO_REG_CCCR_POWER_CTRL 0x12
#define SDIO_POWER_SMPC 0x01 /* Supports Master Power Control */
#define SDIO_POWER_EMPC 0x02 /* Enable Master Power Control */
#define SDIO_REG_CCCR_SPEED 0x13
#define SDIO_SPEED_SHS 0x01 /* Supports High-Speed mode */
#define SDIO_SPEED_EHS 0x02 /* Enable High-Speed mode */
/*
* Function Basic Registers (FBR)
*/
#define SDIO_REG_FBR_BASE(f) ((f) * 0x100) /* base of function f's FBRs */
#define SDIO_REG_FBR_STD_FUNC_IF 0x00
#define SDIO_FBR_SUPPORTS_CSA 0x40 /* supports Code Storage Area */
#define SDIO_FBR_ENABLE_CSA 0x80 /* enable Code Storage Area */
#define SDIO_REG_FBR_STD_IF_EXT 0x01
#define SDIO_REG_FBR_POWER 0x02
#define SDIO_FBR_POWER_SPS 0x01 /* Supports Power Selection */
#define SDIO_FBR_POWER_EPS 0x02 /* Enable (low) Power Selection */
#define SDIO_REG_FBR_CIS 0x09 /* CIS pointer (3 bytes) */
#define SDIO_REG_FBR_CSA 0x0C /* CSA pointer (3 bytes) */
#define SDIO_REG_FBR_CSA_DATA 0x0F
#define SDIO_REG_FBR_BLKSIZE 0x10 /* block size (2 bytes) */
/* SDIO CIS Tuple code */
#define CISTPL_NULL 0x00
#define CISTPL_CHECKSUM 0x10
#define CISTPL_VERS_1 0x15
#define CISTPL_ALTSTR 0x16
#define CISTPL_MANFID 0x20
#define CISTPL_FUNCID 0x21
#define CISTPL_FUNCE 0x22
#define CISTPL_SDIO_STD 0x91
#define CISTPL_SDIO_EXT 0x92
#define CISTPL_END 0xff
/* SDIO device id */
#define SDIO_ANY_FUNC_ID 0xff
#define SDIO_ANY_MAN_ID 0xffff
#define SDIO_ANY_PROD_ID 0xffff
struct rt_sdio_device_id
{
rt_uint8_t func_code;
rt_uint16_t manufacturer;
rt_uint16_t product;
};
struct rt_sdio_driver
{
char *name;
rt_int32_t (*probe)(struct rt_mmcsd_card *card);
rt_int32_t (*remove)(struct rt_mmcsd_card *card);
struct rt_sdio_device_id *id;
};
rt_int32_t sdio_io_send_op_cond(struct rt_mmcsd_host *host,
rt_uint32_t ocr,
rt_uint32_t *cmd5_resp);
rt_int32_t sdio_io_rw_direct(struct rt_mmcsd_card *card,
rt_int32_t rw,
rt_uint32_t fn,
rt_uint32_t reg_addr,
rt_uint8_t *pdata,
rt_uint8_t raw);
rt_int32_t sdio_io_rw_extended(struct rt_mmcsd_card *card,
rt_int32_t rw,
rt_uint32_t fn,
rt_uint32_t addr,
rt_int32_t op_code,
rt_uint8_t *buf,
rt_uint32_t blocks,
rt_uint32_t blksize);
rt_int32_t sdio_io_rw_extended_block(struct rt_sdio_function *func,
rt_int32_t rw,
rt_uint32_t addr,
rt_int32_t op_code,
rt_uint8_t *buf,
rt_uint32_t len);
rt_uint8_t sdio_io_readb(struct rt_sdio_function *func,
rt_uint32_t reg,
rt_int32_t *err);
rt_int32_t sdio_io_writeb(struct rt_sdio_function *func,
rt_uint32_t reg,
rt_uint8_t data);
rt_uint16_t sdio_io_readw(struct rt_sdio_function *func,
rt_uint32_t addr,
rt_int32_t *err);
rt_int32_t sdio_io_writew(struct rt_sdio_function *func,
rt_uint16_t data,
rt_uint32_t addr);
rt_uint32_t sdio_io_readl(struct rt_sdio_function *func,
rt_uint32_t addr,
rt_int32_t *err);
rt_int32_t sdio_io_writel(struct rt_sdio_function *func,
rt_uint32_t data,
rt_uint32_t addr);
rt_int32_t sdio_io_read_multi_fifo_b(struct rt_sdio_function *func,
rt_uint32_t addr,
rt_uint8_t *buf,
rt_uint32_t len);
rt_int32_t sdio_io_write_multi_fifo_b(struct rt_sdio_function *func,
rt_uint32_t addr,
rt_uint8_t *buf,
rt_uint32_t len);
rt_int32_t sdio_io_read_multi_incr_b(struct rt_sdio_function *func,
rt_uint32_t addr,
rt_uint8_t *buf,
rt_uint32_t len);
rt_int32_t sdio_io_write_multi_incr_b(struct rt_sdio_function *func,
rt_uint32_t addr,
rt_uint8_t *buf,
rt_uint32_t len);
rt_int32_t init_sdio(struct rt_mmcsd_host *host, rt_uint32_t ocr);
rt_int32_t sdio_attach_irq(struct rt_sdio_function *func,
rt_sdio_irq_handler_t *handler);
rt_int32_t sdio_detach_irq(struct rt_sdio_function *func);
void sdio_irq_wakeup(struct rt_mmcsd_host *host);
rt_int32_t sdio_enable_func(struct rt_sdio_function *func);
rt_int32_t sdio_disable_func(struct rt_sdio_function *func);
void sdio_set_drvdata(struct rt_sdio_function *func, void *data);
void* sdio_get_drvdata(struct rt_sdio_function *func);
rt_int32_t sdio_set_block_size(struct rt_sdio_function *func,
rt_uint32_t blksize);
rt_int32_t sdio_register_driver(struct rt_sdio_driver *driver);
rt_int32_t sdio_unregister_driver(struct rt_sdio_driver *driver);
void rt_sdio_init(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,325 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-05-15 lgnq first version.
* 2012-05-28 bernard change interfaces
* 2013-02-20 bernard use RT_SERIAL_RB_BUFSZ to define
* the size of ring buffer.
*/
#ifndef __DEV_SERIAL_H__
#define __DEV_SERIAL_H__
#include <rtthread.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup Serial Serial
*
* @brief Serial driver api
*
* <b>Example</b>
* @code {.c}
*
* #include <rtthread.h>
*
* #define SAMPLE_UART_NAME "uart2"
* static struct rt_semaphore rx_sem;
* static rt_device_t serial;
*
* static rt_err_t uart_input(rt_device_t dev, rt_size_t size)
* {
*
* rt_sem_release(&rx_sem);
*
* return RT_EOK;
* }
*
* static void serial_thread_entry(void *parameter)
* {
* char ch;
*
* while (1)
* {
*
* while (rt_device_read(serial, -1, &ch, 1) != 1)
* {
*
* rt_sem_take(&rx_sem, RT_WAITING_FOREVER);
* }
*
* ch = ch + 1;
* rt_device_write(serial, 0, &ch, 1);
* }
* }
*
* static int uart_sample(int argc, char *argv[])
* {
* rt_err_t ret = RT_EOK;
* char uart_name[RT_NAME_MAX];
* char str[] = "hello RT-Thread!\r\n";
*
* if (argc == 2)
* {
* rt_strncpy(uart_name, argv[1], RT_NAME_MAX);
* }
* else
* {
* rt_strncpy(uart_name, SAMPLE_UART_NAME, RT_NAME_MAX);
* }
*
*
* serial = rt_device_find(uart_name);
* if (!serial)
* {
* rt_kprintf("find %s failed!\n", uart_name);
* return -RT_ERROR;
* }
*
*
* rt_sem_init(&rx_sem, "rx_sem", 0, RT_IPC_FLAG_FIFO);
*
* rt_device_open(serial, RT_DEVICE_FLAG_INT_RX);
*
* rt_device_set_rx_indicate(serial, uart_input);
*
* rt_device_write(serial, 0, str, (sizeof(str) - 1));
*
*
* rt_thread_t thread = rt_thread_create("serial", serial_thread_entry, RT_NULL, 1024, 25, 10);
*
* if (thread != RT_NULL)
* {
* rt_thread_startup(thread);
* }
* else
* {
* ret = -RT_ERROR;
* }
*
* return ret;
* }
*
* MSH_CMD_EXPORT(uart_sample, uart device sample);
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup Serial
* @{
*/
#define BAUD_RATE_2400 2400
#define BAUD_RATE_4800 4800
#define BAUD_RATE_9600 9600
#define BAUD_RATE_19200 19200
#define BAUD_RATE_38400 38400
#define BAUD_RATE_57600 57600
#define BAUD_RATE_115200 115200
#define BAUD_RATE_230400 230400
#define BAUD_RATE_460800 460800
#define BAUD_RATE_500000 500000
#define BAUD_RATE_576000 576000
#define BAUD_RATE_921600 921600
#define BAUD_RATE_1000000 1000000
#define BAUD_RATE_1152000 1152000
#define BAUD_RATE_1500000 1500000
#define BAUD_RATE_2000000 2000000
#define BAUD_RATE_2500000 2500000
#define BAUD_RATE_3000000 3000000
#define BAUD_RATE_3500000 3500000
#define BAUD_RATE_4000000 4000000
#define DATA_BITS_5 5
#define DATA_BITS_6 6
#define DATA_BITS_7 7
#define DATA_BITS_8 8
#define DATA_BITS_9 9
#define STOP_BITS_1 0
#define STOP_BITS_2 1
#define STOP_BITS_3 2
#define STOP_BITS_4 3
#ifdef _WIN32
#include <windows.h>
#else
#define PARITY_NONE 0
#define PARITY_ODD 1
#define PARITY_EVEN 2
#endif
#define BIT_ORDER_LSB 0
#define BIT_ORDER_MSB 1
#define NRZ_NORMAL 0 /* Non Return to Zero : normal mode */
#define NRZ_INVERTED 1 /* Non Return to Zero : inverted mode */
#ifndef RT_SERIAL_RB_BUFSZ
#define RT_SERIAL_RB_BUFSZ 64
#endif
#define RT_SERIAL_EVENT_RX_IND 0x01 /* Rx indication */
#define RT_SERIAL_EVENT_TX_DONE 0x02 /* Tx complete */
#define RT_SERIAL_EVENT_RX_DMADONE 0x03 /* Rx DMA transfer done */
#define RT_SERIAL_EVENT_TX_DMADONE 0x04 /* Tx DMA transfer done */
#define RT_SERIAL_EVENT_RX_TIMEOUT 0x05 /* Rx timeout */
#define RT_SERIAL_DMA_RX 0x01
#define RT_SERIAL_DMA_TX 0x02
#define RT_SERIAL_RX_INT 0x01
#define RT_SERIAL_TX_INT 0x02
#define RT_SERIAL_ERR_OVERRUN 0x01
#define RT_SERIAL_ERR_FRAMING 0x02
#define RT_SERIAL_ERR_PARITY 0x03
#define RT_SERIAL_TX_DATAQUEUE_SIZE 2048
#define RT_SERIAL_TX_DATAQUEUE_LWM 30
#define RT_SERIAL_FLOWCONTROL_CTSRTS 1
#define RT_SERIAL_FLOWCONTROL_NONE 0
/* Default config for serial_configure structure */
#define RT_SERIAL_CONFIG_DEFAULT \
{ \
BAUD_RATE_115200, /* 115200 bits/s */ \
DATA_BITS_8, /* 8 databits */ \
STOP_BITS_1, /* 1 stopbit */ \
PARITY_NONE, /* No parity */ \
BIT_ORDER_LSB, /* LSB first sent */ \
NRZ_NORMAL, /* Normal mode */ \
RT_SERIAL_RB_BUFSZ, /* Buffer size */ \
RT_SERIAL_FLOWCONTROL_NONE, /* Off flowcontrol */ \
0 \
}
/**
* @brief Sets a hook function when RX indicate is called
*
*/
typedef void (*rt_hw_serial_rxind_hookproto_t)(rt_device_t dev, rt_size_t size);
RT_OBJECT_HOOKLIST_DECLARE(rt_hw_serial_rxind_hookproto_t, rt_hw_serial_rxind);
struct serial_configure
{
rt_uint32_t baud_rate;
rt_uint32_t data_bits :4;
rt_uint32_t stop_bits :2;
rt_uint32_t parity :2;
rt_uint32_t bit_order :1;
rt_uint32_t invert :1;
rt_uint32_t bufsz :16;
rt_uint32_t flowcontrol :1;
rt_uint32_t reserved :5;
};
/*
* Serial FIFO mode
*/
struct rt_serial_rx_fifo
{
/* software fifo */
rt_uint8_t *buffer;
rt_uint16_t put_index, get_index;
rt_bool_t is_full;
};
struct rt_serial_tx_fifo
{
struct rt_completion completion;
};
/*
* Serial DMA mode
*/
struct rt_serial_rx_dma
{
rt_bool_t activated;
};
struct rt_serial_tx_dma
{
rt_bool_t activated;
struct rt_data_queue data_queue;
};
struct rt_serial_device
{
struct rt_device parent;
const struct rt_uart_ops *ops;
struct serial_configure config;
void *serial_rx;
void *serial_tx;
struct rt_spinlock spinlock;
#ifdef RT_USING_SERIAL_BYPASS
struct rt_serial_bypass* bypass;
#endif
struct rt_device_notify rx_notify;
};
typedef struct rt_serial_device rt_serial_t;
/**
* @brief Configure the serial device
*/
struct rt_uart_ops
{
rt_err_t (*configure)(struct rt_serial_device *serial, struct serial_configure *cfg);
rt_err_t (*control)(struct rt_serial_device *serial, int cmd, void *arg);
int (*putc)(struct rt_serial_device *serial, char c);
int (*getc)(struct rt_serial_device *serial);
rt_ssize_t (*dma_transmit)(struct rt_serial_device *serial, rt_uint8_t *buf, rt_size_t size, int direction);
};
/**
* @brief Serial interrupt service routine
* @param serial serial device
* @param event event mask
* @ingroup Serial
*/
void rt_hw_serial_isr(struct rt_serial_device *serial, int event);
/**
* @brief Register a serial device to device list
*
* @param serial serial device
* @param name device name
* @param flag device flag
* @param data device private data
* @return rt_err_t error code
* @note This function will register a serial device to system device list,
* and add a device object to system object list.
* @ingroup Serial
*/
rt_err_t rt_hw_serial_register(struct rt_serial_device *serial,
const char *name,
rt_uint32_t flag,
void *data);
/**
* @brief register a serial device to system device list and add a device object to system object list
*
* @param serial serial device
* @return rt_err_t error code
*
* @ingroup Serial
*/
rt_err_t rt_hw_serial_register_tty(struct rt_serial_device *serial);
/*! @}*/
#endif

View File

@@ -0,0 +1,355 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-06-01 KyleChan first version
*/
#ifndef __DEV_SERIAL_V2_H__
#define __DEV_SERIAL_V2_H__
#include <rtthread.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup Serial_v2 Serial v2
*
* @brief Serial v2 driver api
*
* <b>Example</b>
* @code {.c}
*
* #include <rtthread.h>
* #include <rtdevice.h>
*
* #define SAMPLE_UART_NAME "uart1"
*
* struct rx_msg
* {
* rt_device_t dev;
* rt_size_t size;
* };
* static rt_device_t serial;
* static struct rt_messagequeue rx_mq;
*
* static rt_err_t uart_input(rt_device_t dev, rt_size_t size)
* {
* struct rx_msg msg;
* rt_err_t result;
* msg.dev = dev;
* msg.size = size;
*
* result = rt_mq_send(&rx_mq, &msg, sizeof(msg));
* if (result == -RT_EFULL)
* {
* rt_kprintf("message queue full\n");
* }
* return result;
* }
*
* static void serial_thread_entry(void *parameter)
* {
* struct rx_msg msg;
* rt_err_t result;
* rt_uint32_t rx_length;
* static char rx_buffer[BSP_UART1_RX_BUFSIZE + 1];
*
* while (1)
* {
* rt_memset(&msg, 0, sizeof(msg));
* result = rt_mq_recv(&rx_mq, &msg, sizeof(msg), RT_WAITING_FOREVER);
* if (result > 0)
* {
* rx_length = rt_device_read(msg.dev, 0, rx_buffer, msg.size);
* rx_buffer[rx_length] = '\0';
* rt_device_write(serial, 0, rx_buffer, rx_length);
* rt_kprintf("%s\n",rx_buffer);
* }
* }
* }
*
* static int uart_dma_sample(int argc, char *argv[])
* {
* rt_err_t ret = RT_EOK;
* char uart_name[RT_NAME_MAX];
* static char msg_pool[256];
* char str[] = "hello RT-Thread!\r\n";
*
* if (argc == 2)
* {
* rt_strncpy(uart_name, argv[1], RT_NAME_MAX);
* }
* else
* {
* rt_strncpy(uart_name, SAMPLE_UART_NAME, RT_NAME_MAX);
* }
*
* serial = rt_device_find(uart_name);
* if (!serial)
* {
* rt_kprintf("find %s failed!\n", uart_name);
* return RT_ERROR;
* }
*
* rt_mq_init(&rx_mq, "rx_mq",
* msg_pool,
* sizeof(struct rx_msg),
* sizeof(msg_pool),
* RT_IPC_FLAG_FIFO);
*
* rt_device_open(serial, RT_DEVICE_FLAG_RX_NON_BLOCKING | RT_DEVICE_FLAG_TX_BLOCKING);
* rt_device_set_rx_indicate(serial, uart_input);
* rt_device_write(serial, 0, str, (sizeof(str) - 1));
*
* rt_thread_t thread = rt_thread_create("serial", serial_thread_entry, RT_NULL, 1024, 25, 10);
* if (thread != RT_NULL)
* {
* rt_thread_startup(thread);
* }
* else
* {
* ret = RT_ERROR;
* }
*
* return ret;
* }
* MSH_CMD_EXPORT(uart_dma_sample, uart device dma sample);
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup Serial_v2
* @{
*/
#define BAUD_RATE_2400 2400
#define BAUD_RATE_4800 4800
#define BAUD_RATE_9600 9600
#define BAUD_RATE_19200 19200
#define BAUD_RATE_38400 38400
#define BAUD_RATE_57600 57600
#define BAUD_RATE_115200 115200
#define BAUD_RATE_230400 230400
#define BAUD_RATE_460800 460800
#define BAUD_RATE_500000 500000
#define BAUD_RATE_921600 921600
#define BAUD_RATE_2000000 2000000
#define BAUD_RATE_2500000 2500000
#define BAUD_RATE_3000000 3000000
#define DATA_BITS_5 5
#define DATA_BITS_6 6
#define DATA_BITS_7 7
#define DATA_BITS_8 8
#define DATA_BITS_9 9
#define STOP_BITS_1 0
#define STOP_BITS_2 1
#define STOP_BITS_3 2
#define STOP_BITS_4 3
#ifdef _WIN32
#include <windows.h>
#else
#define PARITY_NONE 0
#define PARITY_ODD 1
#define PARITY_EVEN 2
#endif
#define BIT_ORDER_LSB 0
#define BIT_ORDER_MSB 1
#define NRZ_NORMAL 0 /* Non Return to Zero : normal mode */
#define NRZ_INVERTED 1 /* Non Return to Zero : inverted mode */
#define RT_DEVICE_FLAG_RX_BLOCKING 0x1000
#define RT_DEVICE_FLAG_RX_NON_BLOCKING 0x2000
#define RT_DEVICE_FLAG_TX_BLOCKING 0x4000
#define RT_DEVICE_FLAG_TX_NON_BLOCKING 0x8000
#define RT_SERIAL_RX_BLOCKING RT_DEVICE_FLAG_RX_BLOCKING
#define RT_SERIAL_RX_NON_BLOCKING RT_DEVICE_FLAG_RX_NON_BLOCKING
#define RT_SERIAL_TX_BLOCKING RT_DEVICE_FLAG_TX_BLOCKING
#define RT_SERIAL_TX_NON_BLOCKING RT_DEVICE_FLAG_TX_NON_BLOCKING
#define RT_DEVICE_CHECK_OPTMODE 0x20
#define RT_SERIAL_EVENT_RX_IND 0x01 /* Rx indication */
#define RT_SERIAL_EVENT_TX_DONE 0x02 /* Tx complete */
#define RT_SERIAL_EVENT_RX_DMADONE 0x03 /* Rx DMA transfer done */
#define RT_SERIAL_EVENT_TX_DMADONE 0x04 /* Tx DMA transfer done */
#define RT_SERIAL_EVENT_RX_TIMEOUT 0x05 /* Rx timeout */
#define RT_SERIAL_ERR_OVERRUN 0x01
#define RT_SERIAL_ERR_FRAMING 0x02
#define RT_SERIAL_ERR_PARITY 0x03
#define RT_SERIAL_TX_DATAQUEUE_SIZE 2048
#define RT_SERIAL_TX_DATAQUEUE_LWM 30
#define RT_SERIAL_RX_MINBUFSZ 64
#define RT_SERIAL_TX_MINBUFSZ 64
#define RT_SERIAL_TX_BLOCKING_BUFFER 1
#define RT_SERIAL_TX_BLOCKING_NO_BUFFER 0
#define RT_SERIAL_FLOWCONTROL_CTSRTS 1
#define RT_SERIAL_FLOWCONTROL_NONE 0
/* Default config for serial_configure structure */
#define RT_SERIAL_CONFIG_DEFAULT \
{ \
BAUD_RATE_115200, /* 115200 bits/s */ \
DATA_BITS_8, /* 8 databits */ \
STOP_BITS_1, /* 1 stopbit */ \
PARITY_NONE, /* No parity */ \
BIT_ORDER_LSB, /* LSB first sent */ \
NRZ_NORMAL, /* Normal mode */ \
RT_SERIAL_RX_MINBUFSZ, /* rxBuf size */ \
RT_SERIAL_TX_MINBUFSZ, /* txBuf size */ \
RT_SERIAL_FLOWCONTROL_NONE, /* Off flowcontrol */ \
0 \
}
/**
* @brief Serial receive indicate hook function type
*
*/
typedef void (*rt_hw_serial_rxind_hookproto_t)(rt_device_t dev, rt_size_t size);
RT_OBJECT_HOOKLIST_DECLARE(rt_hw_serial_rxind_hookproto_t, rt_hw_serial_rxind);
struct serial_configure
{
rt_uint32_t baud_rate;
rt_uint32_t data_bits :4;
rt_uint32_t stop_bits :2;
rt_uint32_t parity :2;
rt_uint32_t bit_order :1;
rt_uint32_t invert :1;
rt_uint32_t rx_bufsz :16;
rt_uint32_t tx_bufsz :16;
rt_uint32_t flowcontrol :1;
rt_uint32_t reserved :5;
};
/**
* @brief Serial Receive FIFO mode
*/
struct rt_serial_rx_fifo
{
struct rt_ringbuffer rb;
struct rt_completion rx_cpt;
rt_uint16_t rx_cpt_index;
/* software fifo */
rt_uint8_t buffer[];
};
/**
* @brief Serial Transmit FIFO mode
*
*/
struct rt_serial_tx_fifo
{
struct rt_ringbuffer rb;
rt_size_t put_size;
rt_bool_t activated;
struct rt_completion tx_cpt;
/* software fifo */
rt_uint8_t buffer[];
};
/**
* @brief serial device structure
*
*/
struct rt_serial_device
{
struct rt_device parent;
const struct rt_uart_ops *ops;
struct serial_configure config;
void *serial_rx;
void *serial_tx;
struct rt_device_notify rx_notify;
};
/**
* @brief uart device operations
*
*/
struct rt_uart_ops
{
rt_err_t (*configure)(struct rt_serial_device *serial,
struct serial_configure *cfg);
rt_err_t (*control)(struct rt_serial_device *serial,
int cmd,
void *arg);
int (*putc)(struct rt_serial_device *serial, char c);
int (*getc)(struct rt_serial_device *serial);
rt_ssize_t (*transmit)(struct rt_serial_device *serial,
rt_uint8_t *buf,
rt_size_t size,
rt_uint32_t tx_flag);
};
/**
* @brief Serial interrupt service routine
* @param serial serial device
* @param event event mask
* @ingroup Serial_v2
*/
void rt_hw_serial_isr(struct rt_serial_device *serial, int event);
/**
* @brief Register a serial device to device list
*
* @param serial serial device
* @param name device name
* @param flag device flag
* @param data device private data
* @return rt_err_t error code
* @note This function will register a serial device to system device list,
* and add a device object to system object list.
* @ingroup Serial_v2
*/
rt_err_t rt_hw_serial_register(struct rt_serial_device *serial,
const char *name,
rt_uint32_t flag,
void *data);
/**
* @brief register a serial device to system device list and add a device object to system object list
*
* @param serial serial device
* @return rt_err_t error code
*
* @ingroup Serial_v2
*/
rt_err_t rt_hw_serial_register_tty(struct rt_serial_device *serial);
/*! @}*/
#endif

View File

@@ -0,0 +1,633 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-11-23 Bernard Add extern "C"
* 2020-06-13 armink fix the 3 wires issue
* 2022-09-01 liYony fix api rt_spi_sendrecv16 about MSB and LSB bug
*/
#ifndef __DEV_SPI_H__
#define __DEV_SPI_H__
#include <stdlib.h>
#include <rtthread.h>
#include <drivers/dev_pin.h>
#include <drivers/core/driver.h>
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup SPI SPI
*
* @brief SPI driver api
*
* <b>Example</b>
* @code {.c}
* #include <rtthread.h>
* #include <rtdevice.h>
*
* #define W25Q_SPI_DEVICE_NAME "qspi10"
*
* static void spi_w25q_sample(int argc, char *argv[])
* {
* struct rt_spi_device *spi_dev_w25q;
* char name[RT_NAME_MAX];
* rt_uint8_t w25x_read_id = 0x90;
* rt_uint8_t id[5] = {0};
*
* if (argc == 2)
* {
* rt_strncpy(name, argv[1], RT_NAME_MAX);
* }
* else
* {
* rt_strncpy(name, W25Q_SPI_DEVICE_NAME, RT_NAME_MAX);
* }
*
* // 查找 spi 设备获取设备句柄
* spi_dev_w25q = (struct rt_spi_device *)rt_device_find(name);
* if (!spi_dev_w25q)
* {
* rt_kprintf("spi sample run failed! can't find %s device!\n", name);
* }
* else
* {
* // 方式1使用 rt_spi_send_then_recv()发送命令读取ID
* rt_spi_send_then_recv(spi_dev_w25q, &w25x_read_id, 1, id, 5);
* rt_kprintf("use rt_spi_send_then_recv() read w25q ID is:%x%x\n", id[3], id[4]);
*
* // 方式2使用 rt_spi_transfer_message()发送命令读取ID
* struct rt_spi_message msg1, msg2;
*
* msg1.send_buf = &w25x_read_id;
* msg1.recv_buf = RT_NULL;
* msg1.length = 1;
* msg1.cs_take = 1;
* msg1.cs_release = 0;
* msg1.next = &msg2;
*
* msg2.send_buf = RT_NULL;
* msg2.recv_buf = id;
* msg2.length = 5;
* msg2.cs_take = 0;
* msg2.cs_release = 1;
* msg2.next = RT_NULL;
*
* rt_spi_transfer_message(spi_dev_w25q, &msg1);
* rt_kprintf("use rt_spi_transfer_message() read w25q ID is:%x%x\n", id[3], id[4]);
*
* }
* }
* // 导出到 msh 命令列表中
* MSH_CMD_EXPORT(spi_w25q_sample, spi w25q sample);
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup SPI
* @{
*/
#ifdef __cplusplus
extern "C"{
#endif
/**
* At CPOL=0 the base value of the clock is zero
* - For CPHA=0, data are captured on the clock's rising edge (low->high transition)
* and data are propagated on a falling edge (high->low clock transition).
* - For CPHA=1, data are captured on the clock's falling edge and data are
* propagated on a rising edge.
* At CPOL=1 the base value of the clock is one (inversion of CPOL=0)
* - For CPHA=0, data are captured on clock's falling edge and data are propagated
* on a rising edge.
* - For CPHA=1, data are captured on clock's rising edge and data are propagated
* on a falling edge.
*/
#define RT_SPI_CPHA (1<<0) /*!< bit[0]:CPHA, clock phase */
#define RT_SPI_CPOL (1<<1) /*!< bit[1]:CPOL, clock polarity */
#define RT_SPI_LSB (0<<2) /*!< bit[2]: 0-LSB */
#define RT_SPI_MSB (1<<2) /*!< bit[2]: 1-MSB */
#define RT_SPI_MASTER (0<<3) /*!< SPI master device */
#define RT_SPI_SLAVE (1<<3) /*!< SPI slave device */
#define RT_SPI_CS_HIGH (1<<4) /*!< Chipselect active high */
#define RT_SPI_NO_CS (1<<5) /*!< No chipselect */
#define RT_SPI_3WIRE (1<<6) /*!< SI/SO pin shared */
#define RT_SPI_READY (1<<7) /*!< Slave pulls low to pause */
#define RT_SPI_MODE_MASK (RT_SPI_CPHA | RT_SPI_CPOL | RT_SPI_MSB | RT_SPI_SLAVE | RT_SPI_CS_HIGH | RT_SPI_NO_CS | RT_SPI_3WIRE | RT_SPI_READY)
#define RT_SPI_MODE_0 (0 | 0) /*!< CPOL = 0, CPHA = 0 */
#define RT_SPI_MODE_1 (0 | RT_SPI_CPHA) /*!< CPOL = 0, CPHA = 1 */
#define RT_SPI_MODE_2 (RT_SPI_CPOL | 0) /*!< CPOL = 1, CPHA = 0 */
#define RT_SPI_MODE_3 (RT_SPI_CPOL | RT_SPI_CPHA) /*!< CPOL = 1, CPHA = 1 */
#define RT_SPI_BUS_MODE_SPI (1<<0)
#define RT_SPI_BUS_MODE_QSPI (1<<1)
/**
* @brief SPI message structure
*/
struct rt_spi_message
{
const void *send_buf;
void *recv_buf;
rt_size_t length;
struct rt_spi_message *next;
unsigned cs_take : 1;
unsigned cs_release : 1;
};
/**
* @brief SPI configuration structure
*/
struct rt_spi_configuration
{
rt_uint8_t mode;
rt_uint8_t data_width;
#ifdef RT_USING_DM
rt_uint8_t data_width_tx;
rt_uint8_t data_width_rx;
#else
rt_uint16_t reserved;
#endif
rt_uint32_t max_hz;
};
struct rt_spi_ops;
/**
* @brief SPI bus structure
*/
struct rt_spi_bus
{
struct rt_device parent;
rt_uint8_t mode;
const struct rt_spi_ops *ops;
#ifdef RT_USING_DM
rt_base_t *pins;
rt_bool_t slave;
int num_chipselect;
#endif /* RT_USING_DM */
struct rt_mutex lock;
struct rt_spi_device *owner;
};
/**
* @brief SPI operators
*/
struct rt_spi_ops
{
rt_err_t (*configure)(struct rt_spi_device *device, struct rt_spi_configuration *configuration);
rt_ssize_t (*xfer)(struct rt_spi_device *device, struct rt_spi_message *message);
};
#ifdef RT_USING_DM
/**
* @brief SPI delay info
*/
struct rt_spi_delay
{
#define RT_SPI_DELAY_UNIT_USECS 0
#define RT_SPI_DELAY_UNIT_NSECS 1
#define RT_SPI_DELAY_UNIT_SCK 2
rt_uint16_t value;
rt_uint8_t unit;
};
#endif /* RT_USING_DM */
/**
* @brief SPI Virtual BUS, one device must connected to a virtual BUS
*/
struct rt_spi_device
{
struct rt_device parent;
struct rt_spi_bus *bus;
#ifdef RT_USING_DM
const char *name;
const struct rt_spi_device_id *id;
const struct rt_ofw_node_id *ofw_id;
rt_uint8_t chip_select;
struct rt_spi_delay cs_setup;
struct rt_spi_delay cs_hold;
struct rt_spi_delay cs_inactive;
#endif
struct rt_spi_configuration config;
rt_base_t cs_pin;
void *user_data;
};
/**
* @brief QSPI message structure
*/
struct rt_qspi_message
{
struct rt_spi_message parent;
/* instruction stage */
struct
{
rt_uint8_t content;
rt_uint8_t qspi_lines;
} instruction;
/* address and alternate_bytes stage */
struct
{
rt_uint32_t content;
rt_uint8_t size;
rt_uint8_t qspi_lines;
} address, alternate_bytes;
/* dummy_cycles stage */
rt_uint32_t dummy_cycles;
/* number of lines in qspi data stage, the other configuration items are in parent */
rt_uint8_t qspi_data_lines;
};
/**
* @brief QSPI configuration structure
*/
struct rt_qspi_configuration
{
struct rt_spi_configuration parent;
/* The size of medium */
rt_uint32_t medium_size;
/* double data rate mode */
rt_uint8_t ddr_mode;
/* the data lines max width which QSPI bus supported, such as 1, 2, 4 */
rt_uint8_t qspi_dl_width ;
};
/**
* @brief QSPI operators
*/
struct rt_qspi_device
{
struct rt_spi_device parent;
struct rt_qspi_configuration config;
void (*enter_qspi_mode)(struct rt_qspi_device *device);
void (*exit_qspi_mode)(struct rt_qspi_device *device);
};
#define SPI_DEVICE(dev) ((struct rt_spi_device *)(dev))
#ifdef RT_USING_DM
struct rt_spi_device_id
{
char name[20];
void *data;
};
struct rt_spi_driver
{
struct rt_driver parent;
const struct rt_spi_device_id *ids;
const struct rt_ofw_node_id *ofw_ids;
rt_err_t (*probe)(struct rt_spi_device *device);
rt_err_t (*remove)(struct rt_spi_device *device);
rt_err_t (*shutdown)(struct rt_spi_device *device);
};
rt_err_t rt_spi_driver_register(struct rt_spi_driver *driver);
rt_err_t rt_spi_device_register(struct rt_spi_device *device);
#define RT_SPI_DRIVER_EXPORT(driver) RT_DRIVER_EXPORT(driver, spi, BUILIN)
#endif /* RT_USING_DM */
/**
* @brief register a SPI bus
*
* @param bus the SPI bus
* @param name the name of SPI bus
* @param ops the operations of SPI bus
*
* @return rt_err_t error code
*/
rt_err_t rt_spi_bus_register(struct rt_spi_bus *bus,
const char *name,
const struct rt_spi_ops *ops);
/**
* @brief attach a device on SPI bus
*
* @param device the SPI device
* @param name the name of SPI device
* @param bus_name the name of SPI bus
* @param user_data the user data of SPI device
*
* @return rt_err_t error code
*/
rt_err_t rt_spi_bus_attach_device(struct rt_spi_device *device,
const char *name,
const char *bus_name,
void *user_data);
/**
* @brief attach a device on SPI bus with CS pin
*
* @param device the SPI device
* @param name the name of SPI device
* @param bus_name the name of SPI bus
* @param cs_pin the CS pin of SPI device
* @param user_data the user data of SPI device
*
* @return rt_err_t error code
*/
rt_err_t rt_spi_bus_attach_device_cspin(struct rt_spi_device *device,
const char *name,
const char *bus_name,
rt_base_t cs_pin,
void *user_data);
/**
* @brief Reconfigure the SPI bus for the specified device.
*
* @param device: Pointer to the SPI device attached to the SPI bus.
* @retval RT_EOK if the SPI device was successfully released and the bus was configured.
* RT_EBUSY if the SPI bus is currently in use; the new configuration will take effect once the device releases the bus.
* Other return values indicate failure to configure the SPI bus due to various reasons.
* @note If the configuration of the SPI device has been updated and requires bus re-initialization,
* call this function directly. This function will reconfigure the SPI bus for the specified device.
* If this is the first time to initialize the SPI device, please call rt_spi_configure or rt_qspi_configure.
* This function is used to reconfigure the SPI bus when the SPI device is already in use.
* For further details, refer to:
* https://github.com/RT-Thread/rt-thread/pull/8528
*/
rt_err_t rt_spi_bus_configure(struct rt_spi_device *device);
/**
* @brief This function takes SPI bus.
*
* @param device the SPI device attached to SPI bus
*
* @return RT_EOK on taken SPI bus successfully. others on taken SPI bus failed.
*/
rt_err_t rt_spi_take_bus(struct rt_spi_device *device);
/**
* @brief This function releases SPI bus.
*
* @param device the SPI device attached to SPI bus
*
* @return RT_EOK on release SPI bus successfully.
*/
rt_err_t rt_spi_release_bus(struct rt_spi_device *device);
/**
* @brief This function take SPI device (takes CS of SPI device).
*
* @param device the SPI device attached to SPI bus
*
* @return RT_EOK on release SPI bus successfully. others on taken SPI bus failed.
*/
rt_err_t rt_spi_take(struct rt_spi_device *device);
/**
* @brief This function releases SPI device (releases CS of SPI device).
*
* @param device the SPI device attached to SPI bus
*
* @return RT_EOK on release SPI device successfully.
*/
rt_err_t rt_spi_release(struct rt_spi_device *device);
/**
* @brief This function can set configuration on SPI device.
*
* @param device: the SPI device attached to SPI bus
* @param cfg: the configuration pointer.
*
* @retval RT_EOK on release SPI device successfully.
* RT_EBUSY is not an error condition and the configuration will take effect once the device has the bus
* others on taken SPI bus failed.
*/
rt_err_t rt_spi_configure(struct rt_spi_device *device,
struct rt_spi_configuration *cfg);
/**
* @brief This function can send data then receive data from SPI device.
*
* @param device the SPI device attached to SPI bus
* @param send_buf the buffer to be transmitted to SPI device.
* @param send_length the number of data to be transmitted.
* @param recv_buf the buffer to be recivied from SPI device.
* @param recv_length the data to be recivied.
*
* @return rt_err_t error code
*/
rt_err_t rt_spi_send_then_recv(struct rt_spi_device *device,
const void *send_buf,
rt_size_t send_length,
void *recv_buf,
rt_size_t recv_length);
/**
* @brief This function can send data then send data from SPI device.
*
* @param device the SPI device attached to SPI bus
* @param send_buf1 the buffer to be transmitted to SPI device.
* @param send_length1 the number of data to be transmitted.
* @param send_buf2 the buffer to be transmitted to SPI device.
* @param send_length2 the number of data to be transmitted.
*
* @return the status of transmit.
*/
rt_err_t rt_spi_send_then_send(struct rt_spi_device *device,
const void *send_buf1,
rt_size_t send_length1,
const void *send_buf2,
rt_size_t send_length2);
/**
* @brief This function transmits data to SPI device.
*
* @param device the SPI device attached to SPI bus
* @param send_buf the buffer to be transmitted to SPI device.
* @param recv_buf the buffer to save received data from SPI device.
* @param length the length of transmitted data.
*
* @return the actual length of transmitted.
*/
rt_ssize_t rt_spi_transfer(struct rt_spi_device *device,
const void *send_buf,
void *recv_buf,
rt_size_t length);
/**
* @brief The SPI device transmits 8 bytes of data
*
* @param device the SPI device attached to SPI bus
* @param senddata send data buffer
* @param recvdata receive data buffer
*
* @return rt_err_t error code
*/
rt_err_t rt_spi_sendrecv8(struct rt_spi_device *device,
rt_uint8_t senddata,
rt_uint8_t *recvdata);
/**
* @brief The SPI device transmits 16 bytes of data
*
* @param device the SPI device attached to SPI bus
* @param senddata send data buffer
* @param recvdata receive data buffer
*
* @return rt_err_t error code
*/
rt_err_t rt_spi_sendrecv16(struct rt_spi_device *device,
rt_uint16_t senddata,
rt_uint16_t *recvdata);
/**
* @brief This function transfers a message list to the SPI device.
*
* @param device the SPI device attached to SPI bus
* @param message the message list to be transmitted to SPI device
*
* @return RT_NULL if transmits message list successfully,
* SPI message which be transmitted failed.
*/
struct rt_spi_message *rt_spi_transfer_message(struct rt_spi_device *device,
struct rt_spi_message *message);
/**
* @brief This function receives data from SPI device.
*
* @param device the SPI device attached to SPI bus
* @param recv_buf the buffer to be recivied from SPI device.
* @param length the data to be recivied.
*
* @return the actual length of received.
*/
rt_inline rt_size_t rt_spi_recv(struct rt_spi_device *device,
void *recv_buf,
rt_size_t length)
{
return rt_spi_transfer(device, RT_NULL, recv_buf, length);
}
/**
* @brief This function sends data to SPI device.
*
* @param device the SPI device attached to SPI bus
* @param send_buf the buffer to be transmitted to SPI device.
* @param length the number of data to be transmitted.
*
* @return the actual length of send.
*/
rt_inline rt_size_t rt_spi_send(struct rt_spi_device *device,
const void *send_buf,
rt_size_t length)
{
return rt_spi_transfer(device, send_buf, RT_NULL, length);
}
/**
* @brief This function appends a message to the SPI message list.
*
* @param list the SPI message list header.
* @param message the message pointer to be appended to the message list.
*/
rt_inline void rt_spi_message_append(struct rt_spi_message *list,
struct rt_spi_message *message)
{
RT_ASSERT(list != RT_NULL);
if (message == RT_NULL)
return; /* not append */
while (list->next != RT_NULL)
{
list = list->next;
}
list->next = message;
message->next = RT_NULL;
}
/**
* @brief This function can set configuration on QSPI device.
*
* @param device the QSPI device attached to QSPI bus.
* @param cfg the configuration pointer.
*
* @return the actual length of transmitted.
*/
rt_err_t rt_qspi_configure(struct rt_qspi_device *device, struct rt_qspi_configuration *cfg);
/**
* @brief This function can register a SPI bus for QSPI mode.
*
* @param bus the SPI bus for QSPI mode.
* @param name The name of the spi bus.
* @param ops the SPI bus instance to be registered.
*
* @return the actual length of transmitted.
*/
rt_err_t rt_qspi_bus_register(struct rt_spi_bus *bus, const char *name, const struct rt_spi_ops *ops);
/**
* @brief This function transmits data to QSPI device.
*
* @param device the QSPI device attached to QSPI bus.
* @param message the message pointer.
*
* @return the actual length of transmitted.
*/
rt_size_t rt_qspi_transfer_message(struct rt_qspi_device *device, struct rt_qspi_message *message);
/**
* @brief This function can send data then receive data from QSPI device
*
* @param device the QSPI device attached to QSPI bus.
* @param send_buf the buffer to be transmitted to QSPI device.
* @param send_length the number of data to be transmitted.
* @param recv_buf the buffer to be recivied from QSPI device.
* @param recv_length the data to be recivied.
*
* @return the status of transmit.
*/
rt_err_t rt_qspi_send_then_recv(struct rt_qspi_device *device, const void *send_buf, rt_size_t send_length,void *recv_buf, rt_size_t recv_length);
/**
* @brief This function can send data to QSPI device
*
* @param device the QSPI device attached to QSPI bus.
* @param send_buf the buffer to be transmitted to QSPI device.
* @param length the number of data to be transmitted.
*
* @return the status of transmit.
*/
rt_err_t rt_qspi_send(struct rt_qspi_device *device, const void *send_buf, rt_size_t length);
#ifdef __cplusplus
}
#endif
/*! @}*/
#endif

View File

@@ -0,0 +1,248 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-05-20 tyustli the first version
*/
#ifndef __DEV_TOUCH_H__
#define __DEV_TOUCH_H__
#include <rtthread.h>
#include "dev_pin.h"
/**
* @addtogroup Drivers RTTHREAD Driver
* @defgroup Touch Touch
*
* @brief Touch driver api
*
* <b>Example</b>
* @code {.c}
* #include <rtthread.h>
* #include "rtdevice.h"
*
* static rt_thread_t gt9147_thread = RT_NULL;
* static rt_sem_t gt9147_sem = RT_NULL;
* static rt_device_t dev = RT_NULL;
* static struct rt_touch_data *read_data;
*
* // 读取数据线程入口函数
* static void gt9147_entry(void *parameter)
* {
* struct rt_touch_data *read_data;
* read_data = (struct rt_touch_data *)rt_malloc(sizeof(struct rt_touch_data) * 5);
*
* while (1)
* {
* // 请求信号量
* rt_sem_take(gt9147_sem, RT_WAITING_FOREVER);
* // 读取五个点的触摸信息
* if (rt_device_read(dev, 0, read_data, 5) == 5)
* {
* for (rt_uint8_t i = 0; i < 5; i++)
* {
* if (read_data[i].event == RT_TOUCH_EVENT_DOWN || read_data[i].event == RT_TOUCH_EVENT_MOVE)
* {
* rt_kprintf("%d %d %d %d %d\n",
* read_data[i].track_id,
* read_data[i].x_coordinate,
* read_data[i].y_coordinate,
* read_data[i].timestamp,
* read_data[i].width);
* }
* }
* }
* // 打开中断
* rt_device_control(dev, RT_TOUCH_CTRL_ENABLE_INT, RT_NULL);
* }
* }
*
* // 接收回调函数
* static rt_err_t rx_callback(rt_device_t dev, rt_size_t size)
* {
* // 关闭中断
* rt_device_control(dev, RT_TOUCH_CTRL_DISABLE_INT, RT_NULL);
* // 释放信号量
* rt_sem_release(gt9147_sem);
* return 0;
* }
*
* static int gt9147_sample(void)
* {
* // 查找 Touch 设备
* dev = rt_device_find("touch");
*
* if (dev == RT_NULL)
* {
* rt_kprintf("can't find device:%s\n", "touch");
* return -1;
* }
* // 以中断的方式打开设备
* if (rt_device_open(dev, RT_DEVICE_FLAG_INT_RX) != RT_EOK)
* {
* rt_kprintf("open device failed!");
* return -1;
* }
* // 设置接收回调
* rt_device_set_rx_indicate(dev, rx_callback);
* // 创建信号量
* gt9147_sem = rt_sem_create("dsem", 0, RT_IPC_FLAG_PRIO);
*
* if (gt9147_sem == RT_NULL)
* {
* rt_kprintf("create dynamic semaphore failed.\n");
* return -1;
* }
* // 创建读取数据线程
* gt9147_thread = rt_thread_create("thread1",
* gt9147_entry,
* RT_NULL,
* THREAD_STACK_SIZE,
* THREAD_PRIORITY,
* THREAD_TIMESLICE);
* // 启动线程
* if (gt9147_thread != RT_NULL)
* rt_thread_startup(gt9147_thread);
*
* return 0;
* }
* MSH_CMD_EXPORT(gt9147_sample, gt9147 sample);
* @endcode
*
* @ingroup Drivers
*/
/*!
* @addtogroup Touch
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifdef RT_USING_RTC
#define rt_touch_get_ts() time(RT_NULL) /* API for the touch to get the timestamp */
#else
#define rt_touch_get_ts() rt_tick_get() /* API for the touch to get the timestamp */
#endif
/* Touch vendor types */
#define RT_TOUCH_VENDOR_UNKNOWN (0) /* unknown */
#define RT_TOUCH_VENDOR_GT (1) /* GTxx series */
#define RT_TOUCH_VENDOR_FT (2) /* FTxx series */
/* Touch ic type*/
#define RT_TOUCH_TYPE_NONE (0) /* touch ic none */
#define RT_TOUCH_TYPE_CAPACITANCE (1) /* capacitance ic */
#define RT_TOUCH_TYPE_RESISTANCE (2) /* resistance ic */
/* Touch control cmd types */
#define RT_TOUCH_CTRL_GET_ID (RT_DEVICE_CTRL_BASE(Touch) + 0) /* Get device id */
#define RT_TOUCH_CTRL_GET_INFO (RT_DEVICE_CTRL_BASE(Touch) + 1) /* Get touch info */
#define RT_TOUCH_CTRL_SET_MODE (RT_DEVICE_CTRL_BASE(Touch) + 2) /* Set touch's work mode. ex. RT_TOUCH_MODE_POLLING,RT_TOUCH_MODE_INT */
#define RT_TOUCH_CTRL_SET_X_RANGE (RT_DEVICE_CTRL_BASE(Touch) + 3) /* Set x coordinate range */
#define RT_TOUCH_CTRL_SET_Y_RANGE (RT_DEVICE_CTRL_BASE(Touch) + 4) /* Set y coordinate range */
#define RT_TOUCH_CTRL_SET_X_TO_Y (RT_DEVICE_CTRL_BASE(Touch) + 5) /* Set X Y coordinate exchange */
#define RT_TOUCH_CTRL_DISABLE_INT (RT_DEVICE_CTRL_BASE(Touch) + 6) /* Disable interrupt */
#define RT_TOUCH_CTRL_ENABLE_INT (RT_DEVICE_CTRL_BASE(Touch) + 7) /* Enable interrupt */
#define RT_TOUCH_CTRL_POWER_ON (RT_DEVICE_CTRL_BASE(Touch) + 8) /* Touch Power On */
#define RT_TOUCH_CTRL_POWER_OFF (RT_DEVICE_CTRL_BASE(Touch) + 9) /* Touch Power Off */
#define RT_TOUCH_CTRL_GET_STATUS (RT_DEVICE_CTRL_BASE(Touch) + 10) /* Get Touch Power Status */
/* Touch event */
#define RT_TOUCH_EVENT_NONE (0) /* Touch none */
#define RT_TOUCH_EVENT_UP (1) /* Touch up event */
#define RT_TOUCH_EVENT_DOWN (2) /* Touch down event */
#define RT_TOUCH_EVENT_MOVE (3) /* Touch move event */
/**
* @brief Touch information
*/
struct rt_touch_info
{
rt_uint8_t type; /* The touch type */
rt_uint8_t vendor; /* Vendor of touchs */
rt_uint8_t point_num; /* Support point num */
rt_int32_t range_x; /* X coordinate range */
rt_int32_t range_y; /* Y coordinate range */
};
/**
* @brief Touch configuration
*/
struct rt_touch_config
{
#ifdef RT_TOUCH_PIN_IRQ
struct rt_device_pin_mode irq_pin; /* Interrupt pin, The purpose of this pin is to notification read data */
#endif
char *dev_name; /* The name of the communication device */
void *user_data;
};
typedef struct rt_touch_device *rt_touch_t;
/**
* @brief Touch device
*/
struct rt_touch_device
{
struct rt_device parent; /* The standard device */
struct rt_touch_info info; /* The touch info data */
struct rt_touch_config config; /* The touch config data */
const struct rt_touch_ops *ops; /* The touch ops */
rt_err_t (*irq_handle)(rt_touch_t touch); /* Called when an interrupt is generated, registered by the driver */
};
/**
* @brief Touch data
*/
struct rt_touch_data
{
rt_uint8_t event; /* The touch event of the data */
rt_uint8_t track_id; /* Track id of point */
rt_uint8_t width; /* Point of width */
rt_uint16_t x_coordinate; /* Point of x coordinate */
rt_uint16_t y_coordinate; /* Point of y coordinate */
rt_tick_t timestamp; /* The timestamp when the data was received */
};
/**
* @brief Touch device operations
*/
struct rt_touch_ops
{
rt_size_t (*touch_readpoint)(struct rt_touch_device *touch, void *buf, rt_size_t touch_num);
rt_err_t (*touch_control)(struct rt_touch_device *touch, int cmd, void *arg);
};
/**
* @brief register a touch device
* @param touch the touch device
* @param name the name of touch device
* @param flag the flag of touch device
* @param data the user data of touch device
* @return rt_err_t error code
*/
int rt_hw_touch_register(rt_touch_t touch,
const char *name,
rt_uint32_t flag,
void *data);
/**
* @brief Touch irq handle
* @param touch the touch device
*
* @note If you doesn't use pin device. you must call this function in your touch irq callback
*/
void rt_hw_touch_isr(rt_touch_t touch);
#ifdef __cplusplus
}
#endif
/*! @}*/
#endif /* __DEV_TOUCH_H__ */

View File

@@ -0,0 +1,42 @@
/*
* COPYRIGHT (C) 2011-2023, Real-Thread Information Technology Ltd
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-09-12 heyuanjie87 first version.
*/
#ifndef __DEV_WATCHDOG_H__
#define __DEV_WATCHDOG_H__
#include <rtthread.h>
#define RT_DEVICE_CTRL_WDT_GET_TIMEOUT (RT_DEVICE_CTRL_BASE(WDT) + 1) /* get timeout(in seconds) */
#define RT_DEVICE_CTRL_WDT_SET_TIMEOUT (RT_DEVICE_CTRL_BASE(WDT) + 2) /* set timeout(in seconds) */
#define RT_DEVICE_CTRL_WDT_GET_TIMELEFT (RT_DEVICE_CTRL_BASE(WDT) + 3) /* get the left time before reboot(in seconds) */
#define RT_DEVICE_CTRL_WDT_KEEPALIVE (RT_DEVICE_CTRL_BASE(WDT) + 4) /* refresh watchdog */
#define RT_DEVICE_CTRL_WDT_START (RT_DEVICE_CTRL_BASE(WDT) + 5) /* start watchdog */
#define RT_DEVICE_CTRL_WDT_STOP (RT_DEVICE_CTRL_BASE(WDT) + 6) /* stop watchdog */
struct rt_watchdog_ops;
struct rt_watchdog_device
{
struct rt_device parent;
const struct rt_watchdog_ops *ops;
};
typedef struct rt_watchdog_device rt_watchdog_t;
struct rt_watchdog_ops
{
rt_err_t (*init)(rt_watchdog_t *wdt);
rt_err_t (*control)(rt_watchdog_t *wdt, int cmd, void *arg);
};
rt_err_t rt_hw_watchdog_register(rt_watchdog_t *wdt,
const char *name,
rt_uint32_t flag,
void *data);
#endif /* __DEV_WATCHDOG_H__ */

View File

@@ -0,0 +1,234 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#ifndef __DMA_H__
#define __DMA_H__
#include <rtthread.h>
#include <drivers/ofw.h>
#include <drivers/misc.h>
#include <drivers/core/dm.h>
#include <mmu.h>
#include <mm_page.h>
#include <bitmap.h>
struct rt_dma_chan;
struct rt_dma_controller_ops;
enum rt_dma_transfer_direction
{
RT_DMA_MEM_TO_MEM,
RT_DMA_MEM_TO_DEV,
RT_DMA_DEV_TO_MEM,
RT_DMA_DEV_TO_DEV,
RT_DMA_DIR_MAX,
};
enum rt_dma_slave_buswidth
{
RT_DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
RT_DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
RT_DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
RT_DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
RT_DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
RT_DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
RT_DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
RT_DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
RT_DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
RT_DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
RT_DMA_SLAVE_BUSWIDTH_BYTES_MAX,
};
struct rt_dma_slave_config
{
enum rt_dma_transfer_direction direction;
enum rt_dma_slave_buswidth src_addr_width;
enum rt_dma_slave_buswidth dst_addr_width;
rt_ubase_t src_addr;
rt_ubase_t dst_addr;
rt_uint32_t src_maxburst;
rt_uint32_t dst_maxburst;
rt_uint32_t src_port_window_size;
rt_uint32_t dst_port_window_size;
};
struct rt_dma_slave_transfer
{
rt_ubase_t src_addr;
rt_ubase_t dst_addr;
void *buffer;
rt_ubase_t dma_handle;
rt_size_t buffer_len;
rt_size_t period_len;
};
struct rt_dma_controller
{
rt_list_t list;
struct rt_device *dev;
RT_BITMAP_DECLARE(dir_cap, RT_DMA_DIR_MAX);
const struct rt_dma_controller_ops *ops;
rt_list_t channels_nodes;
struct rt_mutex mutex;
};
struct rt_dma_controller_ops
{
struct rt_dma_chan *(*request_chan)(struct rt_dma_controller *ctrl,
struct rt_device *slave, void *fw_data);
rt_err_t (*release_chan)(struct rt_dma_chan *chan);
rt_err_t (*start)(struct rt_dma_chan *chan);
rt_err_t (*stop)(struct rt_dma_chan *chan);
rt_err_t (*config)(struct rt_dma_chan *chan, struct rt_dma_slave_config *conf);
rt_err_t (*prep_memcpy)(struct rt_dma_chan *chan,
rt_ubase_t dma_addr_src, rt_ubase_t dma_addr_dst, rt_size_t len);
rt_err_t (*prep_cyclic)(struct rt_dma_chan *chan,
rt_ubase_t dma_buf_addr, rt_size_t buf_len, rt_size_t period_len,
enum rt_dma_transfer_direction dir);
rt_err_t (*prep_single)(struct rt_dma_chan *chan,
rt_ubase_t dma_buf_addr, rt_size_t buf_len,
enum rt_dma_transfer_direction dir);
};
struct rt_dma_chan
{
const char *name;
struct rt_dma_controller *ctrl;
struct rt_device *slave;
rt_list_t list;
rt_err_t conf_err;
rt_err_t prep_err;
struct rt_dma_slave_config conf;
struct rt_dma_slave_transfer transfer;
void (*callback)(struct rt_dma_chan *chan, rt_size_t size);
void *priv;
};
struct rt_dma_pool
{
rt_region_t region;
rt_list_t list;
rt_ubase_t flags;
rt_bitmap_t *map;
rt_size_t bits;
rt_ubase_t start;
struct rt_device *dev;
};
struct rt_dma_map_ops
{
void *(*alloc)(struct rt_device *dev, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags);
void (*free)(struct rt_device *dev, rt_size_t size,
void *cpu_addr, rt_ubase_t dma_handle, rt_ubase_t flags);
rt_err_t (*sync_out_data)(struct rt_device *dev, void *data, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags);
rt_err_t (*sync_in_data)(struct rt_device *dev, void *out_data, rt_size_t size,
rt_ubase_t dma_handle, rt_ubase_t flags);
};
rt_inline void rt_dma_controller_add_direction(struct rt_dma_controller *ctrl,
enum rt_dma_transfer_direction dir)
{
RT_ASSERT(ctrl != RT_NULL);
RT_ASSERT(dir < RT_DMA_DIR_MAX);
rt_bitmap_set_bit(ctrl->dir_cap, dir);
}
rt_err_t rt_dma_controller_register(struct rt_dma_controller *ctrl);
rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl);
rt_err_t rt_dma_chan_start(struct rt_dma_chan *chan);
rt_err_t rt_dma_chan_stop(struct rt_dma_chan *chan);
rt_err_t rt_dma_chan_config(struct rt_dma_chan *chan,
struct rt_dma_slave_config *conf);
rt_err_t rt_dma_chan_done(struct rt_dma_chan *chan, rt_size_t size);
rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
struct rt_dma_slave_transfer *transfer);
rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
struct rt_dma_slave_transfer *transfer);
rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
struct rt_dma_slave_transfer *transfer);
struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name);
rt_err_t rt_dma_chan_release(struct rt_dma_chan *chan);
#define RT_DMA_F_LINEAR RT_BIT(0)
#define RT_DMA_F_32BITS RT_BIT(1)
#define RT_DMA_F_NOCACHE RT_BIT(2)
#define RT_DMA_F_DEVICE RT_BIT(3)
#define RT_DMA_F_NOMAP RT_BIT(4)
#define RT_DMA_PAGE_SIZE ARCH_PAGE_SIZE
void *rt_dma_alloc(struct rt_device *dev, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags);
void rt_dma_free(struct rt_device *dev, rt_size_t size,
void *cpu_addr, rt_ubase_t dma_handle, rt_ubase_t flags);
rt_inline void *rt_dma_alloc_coherent(struct rt_device *dev, rt_size_t size,
rt_ubase_t *dma_handle)
{
return rt_dma_alloc(dev, size, dma_handle,
RT_DMA_F_NOCACHE | RT_DMA_F_LINEAR);
}
rt_inline void rt_dma_free_coherent(struct rt_device *dev, rt_size_t size,
void *cpu_addr, rt_ubase_t dma_handle)
{
rt_dma_free(dev, size, cpu_addr, dma_handle,
RT_DMA_F_NOCACHE | RT_DMA_F_LINEAR);
}
rt_err_t rt_dma_sync_out_data(struct rt_device *dev, void *data, rt_size_t size,
rt_ubase_t *dma_handle, rt_ubase_t flags);
rt_err_t rt_dma_sync_in_data(struct rt_device *dev, void *out_data, rt_size_t size,
rt_ubase_t dma_handle, rt_ubase_t flags);
rt_inline rt_bool_t rt_dma_device_is_coherent(struct rt_device *dev)
{
return rt_dm_dev_prop_read_bool(dev, "dma-coherent");
}
rt_inline void rt_dma_device_set_ops(struct rt_device *dev,
const struct rt_dma_map_ops *ops)
{
dev->dma_ops = ops;
}
struct rt_dma_pool *rt_dma_pool_install(rt_region_t *region);
rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
rt_size_t cma_size, rt_size_t coherent_pool_size);
#endif /* __DMA_H__ */

View File

@@ -0,0 +1,17 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#ifndef __IIO_H__
#define __IIO_H__
void *rt_iio_channel_get_by_index(struct rt_device *dev, int index, int *out_channel);
void *rt_iio_channel_get_by_name(struct rt_device *dev, const char *name, int *out_channel);
#endif /* __IIO_H__ */

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#ifndef __LED_H__
#define __LED_H__
#include <rthw.h>
#include <rtdef.h>
struct rt_led_ops;
enum rt_led_state
{
RT_LED_S_OFF,
RT_LED_S_ON,
RT_LED_S_TOGGLE,
RT_LED_S_BLINK,
RT_LED_STATE_NR,
};
struct rt_led_device
{
struct rt_device parent;
const struct rt_led_ops *ops;
struct rt_spinlock spinlock;
void *sysdata;
void *priv;
};
struct rt_led_ops
{
rt_err_t (*set_state)(struct rt_led_device *led, enum rt_led_state state);
rt_err_t (*get_state)(struct rt_led_device *led, enum rt_led_state *out_state);
rt_err_t (*set_period)(struct rt_led_device *led, rt_uint32_t period_ms);
rt_err_t (*set_brightness)(struct rt_led_device *led, rt_uint32_t brightness);
};
rt_err_t rt_hw_led_register(struct rt_led_device *led);
rt_err_t rt_hw_led_unregister(struct rt_led_device *led);
rt_err_t rt_led_set_state(struct rt_led_device *led, enum rt_led_state state);
rt_err_t rt_led_get_state(struct rt_led_device *led, enum rt_led_state *out_state);
rt_err_t rt_led_set_period(struct rt_led_device *led, rt_uint32_t period_ms);
rt_err_t rt_led_set_brightness(struct rt_led_device *led, rt_uint32_t brightness);
#endif /* __LED_H__ */

View File

@@ -0,0 +1,77 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-23 GuEe-GUI first version
*/
#ifndef __MAILBOX_H__
#define __MAILBOX_H__
#include <rtdef.h>
#include <drivers/ofw.h>
struct rt_mbox_chan;
struct rt_mbox_client;
struct rt_mbox_controller_ops;
struct rt_mbox_controller
{
rt_list_t list;
struct rt_device *dev;
const struct rt_mbox_controller_ops *ops;
rt_size_t num_chans;
struct rt_mbox_chan *chans;
};
struct rt_mbox_controller_ops
{
rt_err_t (*request)(struct rt_mbox_chan *);
void (*release)(struct rt_mbox_chan *);
rt_err_t (*send)(struct rt_mbox_chan *, const void *data);
rt_bool_t (*peek)(struct rt_mbox_chan *);
int (*ofw_parse)(struct rt_mbox_controller *, struct rt_ofw_cell_args *);
};
struct rt_mbox_chan
{
struct rt_mbox_controller *ctrl;
struct rt_mbox_client *client;
void *data;
rt_bool_t complete;
struct rt_timer timer;
struct rt_spinlock lock;
void *priv;
};
struct rt_mbox_client
{
struct rt_device *dev;
void (*rx_callback)(struct rt_mbox_client *, void *data);
void (*tx_prepare)(struct rt_mbox_client *, const void *data);
void (*tx_done)(struct rt_mbox_client *, const void *data, rt_err_t err);
};
rt_err_t rt_mbox_controller_register(struct rt_mbox_controller *ctrl);
rt_err_t rt_mbox_controller_unregister(struct rt_mbox_controller *ctrl);
rt_err_t rt_mbox_send(struct rt_mbox_chan *chan, const void *data,
rt_uint32_t timeout_ms);
void rt_mbox_send_done(struct rt_mbox_chan *chan, rt_err_t err);
rt_bool_t rt_mbox_peek(struct rt_mbox_chan *chan);
rt_err_t rt_mbox_recv(struct rt_mbox_chan *chan, void *data);
struct rt_mbox_chan *rt_mbox_request_by_index(struct rt_mbox_client *client, int index);
struct rt_mbox_chan *rt_mbox_request_by_name(struct rt_mbox_client *client, char *name);
rt_err_t rt_mbox_release(struct rt_mbox_chan *chan);
#endif /* __MAILBOX_H__ */

View File

@@ -12,6 +12,7 @@
#define __MISC_H__
#include <rtdef.h>
#include <cpuport.h>
#ifdef ARCH_CPU_64BIT
#define RT_BITS_PER_LONG 64
@@ -33,6 +34,15 @@
(((__x) - ((__d) / 2)) / (__d)); \
})
#define __KEY_PLACEHOLDER_1 0,
#define ____KEY_ENABLED(__ignored, val, ...) val
#define ___KEY_ENABLED(arg1_or_junk) ____KEY_ENABLED(arg1_or_junk 1, 0)
#define __KEY_ENABLED(value) ___KEY_ENABLED(__KEY_PLACEHOLDER_##value)
#define RT_KEY_ENABLED(key) __KEY_ENABLED(key)
#define RT_FIELD_PREP(mask, val) (((rt_uint64_t)(val) << (__rt_ffsl((mask)) - 1)) & (mask))
#define RT_FIELD_GET(mask, val) (((val) & (mask)) >> (__rt_ffsl((mask)) - 1))
#define RT_BIT(n) (1UL << (n))
#define RT_BIT_ULL(n) (1ULL << (n))
#define RT_BIT_MASK(nr) (1UL << ((nr) % RT_BITS_PER_LONG))
@@ -48,6 +58,19 @@
#define RT_ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
#define rt_offsetof(s, field) ((rt_size_t)&((s *)0)->field)
#define rt_err_ptr(err) ((void *)(rt_base_t)(err))
#define rt_ptr_err(ptr) ((rt_err_t)(rt_base_t)(ptr))
#define rt_is_err_value(ptr) ((rt_ubase_t)(void *)(ptr) >= (rt_ubase_t)-4095)
#define rt_is_err(ptr) rt_is_err_value(ptr)
#define rt_is_err_or_null(ptr) (!(ptr) || rt_is_err_value((rt_ubase_t)(ptr)))
#define rt_upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16))
#define rt_lower_32_bits(n) ((rt_uint32_t)((n) & 0xffffffff))
#define rt_upper_16_bits(n) ((rt_uint16_t)((n) >> 16))
#define rt_lower_16_bits(n) ((rt_uint16_t)((n) & 0xffff))
#define rt_min(x, y) \
({ \
typeof(x) _x = (x); \
@@ -71,6 +94,13 @@
_x < _y ? _x: _y; \
})
#define rt_max_t(type, x, y) \
({ \
type _x = (x); \
type _y = (y); \
_x > _y ? _x: _y; \
})
#define rt_clamp(val, lo, hi) rt_min((typeof(val))rt_max(val, lo), hi)
#define rt_do_div(n, base) \
@@ -83,4 +113,34 @@
_rem; \
})
#define rt_abs(x) \
({ \
long ret; \
if (sizeof(x) == sizeof(long)) \
{ \
long __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} \
else \
{ \
int __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} \
ret; \
})
#ifndef rt_ilog2
rt_inline int rt_ilog2(rt_ubase_t v)
{
int l = 0;
while ((1UL << l) < v)
{
l++;
}
return l;
}
#endif /* !rt_ilog2 */
#endif /* __MISC_H__ */

View File

@@ -218,7 +218,7 @@ struct rt_mmcsd_card {
struct rt_sdio_cccr cccr; /* common card info */
struct rt_sdio_cis cis; /* common tuple info */
struct rt_sdio_function *sdio_function[SDIO_MAX_FUNCTIONS + 1]; /* SDIO functions (devices) */
rt_list_t blk_devices; /* for block device list */
void *blk_dev;
struct rt_mmc_ext_csd ext_csd;
};

View File

@@ -134,6 +134,7 @@ struct rt_mmcsd_host
#define MMCSD_SUP_HS200_1V2 (1 << 10)
#define MMCSD_SUP_HS200 (MMCSD_SUP_HS200_1V2 | MMCSD_SUP_HS200_1V8) /* hs200 sdr */
#define MMCSD_SUP_NONREMOVABLE (1 << 11)
#define controller_is_removable(host) (!(host->flags & MMCSD_SUP_NONREMOVABLE))
#define MMCSD_SUP_HS400_1V8 (1 << 12)
#define MMCSD_SUP_HS400_1V2 (1 << 13)
#define MMCSD_SUP_HS400 (MMCSD_SUP_HS400_1V2 | MMCSD_SUP_HS400_1V8) /* hs400 ddr */

View File

@@ -0,0 +1,899 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#ifndef __NVME_H__
#define __NVME_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/blk.h>
#define NVME_RSVD(offset, bytes_size) rt_uint8_t __rsvd##offset[bytes_size]
enum
{
/*
* Generic Command Status:
*/
RT_NVME_SC_SUCCESS = 0x0,
RT_NVME_SC_INVALID_OPCODE = 0x1,
RT_NVME_SC_INVALID_FIELD = 0x2,
RT_NVME_SC_CMDID_CONFLICT = 0x3,
RT_NVME_SC_DATA_XFER_ERROR = 0x4,
RT_NVME_SC_POWER_LOSS = 0x5,
RT_NVME_SC_INTERNAL = 0x6,
RT_NVME_SC_ABORT_REQ = 0x7,
RT_NVME_SC_ABORT_QUEUE = 0x8,
RT_NVME_SC_FUSED_FAIL = 0x9,
RT_NVME_SC_FUSED_MISSING = 0xa,
RT_NVME_SC_INVALID_NS = 0xb,
RT_NVME_SC_CMD_SEQ_ERROR = 0xc,
RT_NVME_SC_SGL_INVALID_LAST = 0xd,
RT_NVME_SC_SGL_INVALID_COUNT = 0xe,
RT_NVME_SC_SGL_INVALID_DATA = 0xf,
RT_NVME_SC_SGL_INVALID_METADATA = 0x10,
RT_NVME_SC_SGL_INVALID_TYPE = 0x11,
RT_NVME_SC_CMB_INVALID_USE = 0x12,
RT_NVME_SC_PRP_INVALID_OFFSET = 0x13,
RT_NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
RT_NVME_SC_OP_DENIED = 0x15,
RT_NVME_SC_SGL_INVALID_OFFSET = 0x16,
RT_NVME_SC_RESERVED = 0x17,
RT_NVME_SC_HOST_ID_INCONSIST = 0x18,
RT_NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
RT_NVME_SC_KA_TIMEOUT_INVALID = 0x1a,
RT_NVME_SC_ABORTED_PREEMPT_ABORT = 0x1b,
RT_NVME_SC_SANITIZE_FAILED = 0x1c,
RT_NVME_SC_SANITIZE_IN_PROGRESS = 0x1d,
RT_NVME_SC_SGL_INVALID_GRANULARITY = 0x1e,
RT_NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1f,
RT_NVME_SC_NS_WRITE_PROTECTED = 0x20,
RT_NVME_SC_CMD_INTERRUPTED = 0x21,
RT_NVME_SC_TRANSIENT_TR_ERR = 0x22,
RT_NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 0x24,
RT_NVME_SC_INVALID_IO_CMD_SET = 0x2c,
RT_NVME_SC_LBA_RANGE = 0x80,
RT_NVME_SC_CAP_EXCEEDED = 0x81,
RT_NVME_SC_NS_NOT_READY = 0x82,
RT_NVME_SC_RESERVATION_CONFLICT = 0x83,
RT_NVME_SC_FORMAT_IN_PROGRESS = 0x84,
/*
* Command Specific Status:
*/
RT_NVME_SC_CQ_INVALID = 0x100,
RT_NVME_SC_QID_INVALID = 0x101,
RT_NVME_SC_QUEUE_SIZE = 0x102,
RT_NVME_SC_ABORT_LIMIT = 0x103,
RT_NVME_SC_ABORT_MISSING = 0x104,
RT_NVME_SC_ASYNC_LIMIT = 0x105,
RT_NVME_SC_FIRMWARE_SLOT = 0x106,
RT_NVME_SC_FIRMWARE_IMAGE = 0x107,
RT_NVME_SC_INVALID_VECTOR = 0x108,
RT_NVME_SC_INVALID_LOG_PAGE = 0x109,
RT_NVME_SC_INVALID_FORMAT = 0x10a,
RT_NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
RT_NVME_SC_INVALID_QUEUE = 0x10c,
RT_NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
RT_NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
RT_NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
RT_NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
RT_NVME_SC_FW_NEEDS_RESET = 0x111,
RT_NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
RT_NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113,
RT_NVME_SC_OVERLAPPING_RANGE = 0x114,
RT_NVME_SC_NS_INSUFFICIENT_CAP = 0x115,
RT_NVME_SC_NS_ID_UNAVAILABLE = 0x116,
RT_NVME_SC_NS_ALREADY_ATTACHED = 0x118,
RT_NVME_SC_NS_IS_PRIVATE = 0x119,
RT_NVME_SC_NS_NOT_ATTACHED = 0x11a,
RT_NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
RT_NVME_SC_CTRL_LIST_INVALID = 0x11c,
RT_NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
RT_NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
RT_NVME_SC_CTRL_ID_INVALID = 0x11f,
RT_NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
RT_NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
RT_NVME_SC_RES_ID_INVALID = 0x122,
RT_NVME_SC_PMR_SAN_PROHIBITED = 0x123,
RT_NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
RT_NVME_SC_ANA_ATTACH_FAILED = 0x125,
/*
* I/O Command Set Specific - NVM commands:
*/
RT_NVME_SC_BAD_ATTRIBUTES = 0x180,
RT_NVME_SC_INVALID_PI = 0x181,
RT_NVME_SC_READ_ONLY = 0x182,
RT_NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
/*
* I/O Command Set Specific - Fabrics commands:
*/
RT_NVME_SC_CONNECT_FORMAT = 0x180,
RT_NVME_SC_CONNECT_CTRL_BUSY = 0x181,
RT_NVME_SC_CONNECT_INVALID_PARAM = 0x182,
RT_NVME_SC_CONNECT_RESTART_DISC = 0x183,
RT_NVME_SC_CONNECT_INVALID_HOST = 0x184,
RT_NVME_SC_DISCOVERY_RESTART = 0x190,
RT_NVME_SC_AUTH_REQUIRED = 0x191,
/*
* I/O Command Set Specific - Zoned commands:
*/
RT_NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8,
RT_NVME_SC_ZONE_FULL = 0x1b9,
RT_NVME_SC_ZONE_READ_ONLY = 0x1ba,
RT_NVME_SC_ZONE_OFFLINE = 0x1bb,
RT_NVME_SC_ZONE_INVALID_WRITE = 0x1bc,
RT_NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd,
RT_NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be,
RT_NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf,
/*
* Media and Data Integrity Errors:
*/
RT_NVME_SC_WRITE_FAULT = 0x280,
RT_NVME_SC_READ_ERROR = 0x281,
RT_NVME_SC_GUARD_CHECK = 0x282,
RT_NVME_SC_APPTAG_CHECK = 0x283,
RT_NVME_SC_REFTAG_CHECK = 0x284,
RT_NVME_SC_COMPARE_FAILED = 0x285,
RT_NVME_SC_ACCESS_DENIED = 0x286,
RT_NVME_SC_UNWRITTEN_BLOCK = 0x287,
/*
* Path-related Errors:
*/
RT_NVME_SC_INTERNAL_PATH_ERROR = 0x300,
RT_NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
RT_NVME_SC_ANA_INACCESSIBLE = 0x302,
RT_NVME_SC_ANA_TRANSITION = 0x303,
RT_NVME_SC_CTRL_PATH_ERROR = 0x360,
RT_NVME_SC_HOST_PATH_ERROR = 0x370,
RT_NVME_SC_HOST_ABORTED_CMD = 0x371,
RT_NVME_SC_CRD = 0x1800,
RT_NVME_SC_MORE = 0x2000,
RT_NVME_SC_DNR = 0x4000,
};
/* Admin commands */
enum
{
RT_NVME_ADMIN_OPCODE_DELETE_SQ = 0x00,
RT_NVME_ADMIN_OPCODE_CREATE_SQ = 0x01,
RT_NVME_ADMIN_OPCODE_GET_LOG_PAGE = 0x02,
RT_NVME_ADMIN_OPCODE_DELETE_CQ = 0x04,
RT_NVME_ADMIN_OPCODE_CREATE_CQ = 0x05,
RT_NVME_ADMIN_OPCODE_IDENTIFY = 0x06,
RT_NVME_ADMIN_OPCODE_ABORT_CMD = 0x08,
RT_NVME_ADMIN_OPCODE_SET_FEATURES = 0x09,
RT_NVME_ADMIN_OPCODE_GET_FEATURES = 0x0a,
RT_NVME_ADMIN_OPCODE_ASYNC_EVENT = 0x0c,
RT_NVME_ADMIN_OPCODE_NS_MGMT = 0x0d,
RT_NVME_ADMIN_OPCODE_ACTIVATE_FW = 0x10,
RT_NVME_ADMIN_OPCODE_DOWNLOAD_FW = 0x11,
RT_NVME_ADMIN_OPCODE_DEV_SELF_TEST = 0x14,
RT_NVME_ADMIN_OPCODE_NS_ATTACH = 0x15,
RT_NVME_ADMIN_OPCODE_KEEP_ALIVE = 0x18,
RT_NVME_ADMIN_OPCODE_DIRECTIVE_SEND = 0x19,
RT_NVME_ADMIN_OPCODE_DIRECTIVE_RECV = 0x1a,
RT_NVME_ADMIN_OPCODE_VIRTUAL_MGMT = 0x1c,
RT_NVME_ADMIN_OPCODE_NVME_MI_SEND = 0x1d,
RT_NVME_ADMIN_OPCODE_NVME_MI_RECV = 0x1e,
RT_NVME_ADMIN_OPCODE_DBBUF = 0x7c,
RT_NVME_ADMIN_OPCODE_FORMAT_NVM = 0x80,
RT_NVME_ADMIN_OPCODE_SECURITY_SEND = 0x81,
RT_NVME_ADMIN_OPCODE_SECURITY_RECV = 0x82,
RT_NVME_ADMIN_OPCODE_SANITIZE_NVM = 0x84,
RT_NVME_ADMIN_OPCODE_GET_LBA_STATUS = 0x86,
RT_NVME_ADMIN_OPCODE_VENDOR_START = 0xc0,
};
/* I/O commands */
enum
{
RT_NVME_CMD_FLUSH = 0x00,
RT_NVME_CMD_WRITE = 0x01,
RT_NVME_CMD_READ = 0x02,
RT_NVME_CMD_WRITE_UNCOR = 0x04,
RT_NVME_CMD_COMPARE = 0x05,
RT_NVME_CMD_WRITE_ZEROES = 0x08,
RT_NVME_CMD_DSM = 0x09,
RT_NVME_CMD_VERIFY = 0x0c,
RT_NVME_CMD_RESV_REGISTER = 0x0d,
RT_NVME_CMD_RESV_REPORT = 0x0e,
RT_NVME_CMD_RESV_ACQUIRE = 0x11,
RT_NVME_CMD_RESV_RELEASE = 0x15,
RT_NVME_CMD_ZONE_MGMT_SEND = 0x79,
RT_NVME_CMD_ZONE_MGMT_RECV = 0x7a,
RT_NVME_CMD_ZONE_APPEND = 0x7d,
RT_NVME_CMD_VENDOR_START = 0x80,
};
enum
{
RT_NVME_PSDT_PRP = 0x0,
RT_NVME_PSDT_SGL_MPTR_CONTIGUOUS = 0x1,
RT_NVME_PSDT_SGL_MPTR_SGL = 0x2,
};
/* Commands flags */
enum
{
RT_NVME_CMD_FLAGS_FUSE_SHIFT = 0x00,
RT_NVME_CMD_FLAGS_PSDT_SHIFT = 0x06,
};
struct rt_nvme_command_common
{
rt_uint8_t opcode;
rt_uint8_t flags;
rt_uint16_t cmdid;
rt_le32_t nsid;
rt_le32_t cmd_dw2[2];
rt_le64_t metadata;
rt_le64_t prp1;
rt_le64_t prp2;
rt_le32_t cmd_dw10[6];
};
rt_packed(struct rt_nvme_sgl_desc
{
rt_le64_t adddress;
rt_le32_t length;
rt_uint8_t reserved[3];
#define SGL_DESC_TYPE_DATA_BLOCK 0x0
#define SGL_DESC_TYPE_BIT_BUCKET 0x1
#define SGL_DESC_TYPE_SEGMENT 0x2
#define SGL_DESC_TYPE_LAST_SEGMENT 0x3
#define SGL_DESC_TYPE_KEYED_DATA_BLOCK 0x4
#define SGL_DESC_TYPE_VENDOR_SPECIFIC 0xf
rt_uint8_t sgl_identify;
});
struct rt_nvme_command_rw
{
rt_uint8_t opcode;
rt_uint8_t flags;
rt_uint16_t cmdid;
rt_le32_t nsid;
NVME_RSVD(8, 8);
rt_le64_t metadata;
union
{
struct
{
rt_le64_t prp1;
rt_le64_t prp2;
};
struct rt_nvme_sgl_desc sgl;
};
rt_le64_t slba;
rt_le16_t length;
rt_le16_t control;
rt_le32_t dsmgmt;
rt_le32_t reftag;
rt_le16_t apptag;
rt_le16_t appmask;
};
enum
{
RT_NVME_RW_LR = 1 << 15,
RT_NVME_RW_FUA = 1 << 14,
RT_NVME_RW_APPEND_PIREMAP = 1 << 9,
RT_NVME_RW_DSM_FREQ_UNSPEC = 0,
RT_NVME_RW_DSM_FREQ_TYPICAL = 1,
RT_NVME_RW_DSM_FREQ_RARE = 2,
RT_NVME_RW_DSM_FREQ_READS = 3,
RT_NVME_RW_DSM_FREQ_WRITES = 4,
RT_NVME_RW_DSM_FREQ_RW = 5,
RT_NVME_RW_DSM_FREQ_ONCE = 6,
RT_NVME_RW_DSM_FREQ_PREFETCH = 7,
RT_NVME_RW_DSM_FREQ_TEMP = 8,
RT_NVME_RW_DSM_LATENCY_NONE = 0 << 4,
RT_NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
RT_NVME_RW_DSM_LATENCY_NORM = 2 << 4,
RT_NVME_RW_DSM_LATENCY_LOW = 3 << 4,
RT_NVME_RW_DSM_SEQ_REQ = 1 << 6,
RT_NVME_RW_DSM_COMPRESSED = 1 << 7,
RT_NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
RT_NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
RT_NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
RT_NVME_RW_PRINFO_PRACT = 1 << 13,
RT_NVME_RW_DTYPE_STREAMS = 1 << 4,
RT_NVME_WZ_DEAC = 1 << 9,
};
enum
{
RT_NVME_QUEUE_PHYS_CONTIG = (1 << 0),
RT_NVME_CQ_IRQ_ENABLED = (1 << 1),
RT_NVME_SQ_PRIO_URGENT = (0 << 1),
RT_NVME_SQ_PRIO_HIGH = (1 << 1),
RT_NVME_SQ_PRIO_MEDIUM = (2 << 1),
RT_NVME_SQ_PRIO_LOW = (3 << 1),
RT_NVME_FEAT_ARBITRATION = 0x01,
RT_NVME_FEAT_POWER_MGMT = 0x02,
RT_NVME_FEAT_LBA_RANGE = 0x03,
RT_NVME_FEAT_TEMP_THRESH = 0x04,
RT_NVME_FEAT_ERR_RECOVERY = 0x05,
RT_NVME_FEAT_VOLATILE_WC = 0x06,
RT_NVME_FEAT_NUM_QUEUES = 0x07,
RT_NVME_FEAT_IRQ_COALESCE = 0x08,
RT_NVME_FEAT_IRQ_CONFIG = 0x09,
RT_NVME_FEAT_WRITE_ATOMIC = 0x0a,
RT_NVME_FEAT_ASYNC_EVENT = 0x0b,
RT_NVME_FEAT_AUTO_PST = 0x0c,
RT_NVME_FEAT_SW_PROGRESS = 0x80,
RT_NVME_FEAT_HOST_ID = 0x81,
RT_NVME_FEAT_RESV_MASK = 0x82,
RT_NVME_FEAT_RESV_PERSIST = 0x83,
RT_NVME_LOG_ERROR = 0x01,
RT_NVME_LOG_SMART = 0x02,
RT_NVME_LOG_FW_SLOT = 0x03,
RT_NVME_LOG_RESERVATION = 0x80,
RT_NVME_FWACT_REPL = (0 << 3),
RT_NVME_FWACT_REPL_ACTV = (1 << 3),
RT_NVME_FWACT_ACTV = (2 << 3),
};
struct rt_nvme_command_identify
{
rt_uint8_t opcode;
rt_uint8_t flags;
rt_uint16_t cmdid;
rt_le32_t nsid;
NVME_RSVD(8, 16);
rt_le64_t prp1;
rt_le64_t prp2;
rt_le32_t cns;
NVME_RSVD(64, 20);
};
struct rt_nvme_command_features
{
rt_uint8_t opcode;
rt_uint8_t flags;
rt_uint16_t cmdid;
rt_le32_t nsid;
NVME_RSVD(8, 16);
rt_le64_t prp1;
rt_le64_t prp2;
rt_le32_t fid;
rt_le32_t dword11;
NVME_RSVD(68, 16);
};
struct rt_nvme_command_create_cq
{
rt_uint8_t opcode;
rt_uint8_t flags;
rt_uint16_t cmdid;
NVME_RSVD(4, 20);
rt_le64_t prp1;
NVME_RSVD(32, 8);
rt_le16_t cqid;
rt_le16_t qsize;
rt_le16_t cq_flags;
rt_le16_t irq_vector;
NVME_RSVD(104, 16);
};
struct rt_nvme_command_create_sq
{
rt_uint8_t opcode;
rt_uint8_t flags;
rt_uint16_t cmdid;
NVME_RSVD(4, 20);
rt_le64_t prp1;
NVME_RSVD(32, 8);
rt_le16_t sqid;
rt_le16_t qsize;
rt_le16_t sq_flags;
rt_le16_t cqid;
NVME_RSVD(104, 16);
};
struct rt_nvme_command_delete_queue
{
rt_uint8_t opcode;
rt_uint8_t flags;
rt_uint16_t cmdid;
NVME_RSVD(4, 36);
rt_le16_t qid;
NVME_RSVD(42, 22);
};
struct rt_nvme_command_write_zeroes
{
rt_uint8_t opcode;
rt_uint8_t flags;
rt_uint16_t cmdid;
rt_le32_t nsid;
NVME_RSVD(8, 8);
rt_le64_t metadata;
rt_le64_t prp1;
rt_le64_t prp2;
rt_le64_t slba;
rt_le16_t length;
rt_le16_t control;
rt_le32_t dsmgmt;
rt_le32_t reftag;
rt_le16_t apptag;
rt_le16_t appmask;
};
struct rt_nvme_command
{
union
{
struct rt_nvme_command_common common;
struct rt_nvme_command_rw rw;
struct rt_nvme_command_identify identify;
struct rt_nvme_command_features features;
struct rt_nvme_command_create_cq create_cq;
struct rt_nvme_command_create_sq create_sq;
struct rt_nvme_command_delete_queue delete_queue;
struct rt_nvme_command_write_zeroes write_zeroes;
};
};
struct rt_nvme_completion
{
union
{
rt_le16_t u16;
rt_le32_t u32;
rt_le64_t u64;
} result;
rt_le16_t sq_head; /* How much of this queue may be reclaimed */
rt_le16_t sq_id; /* Submission queue that generated this entry */
rt_uint16_t cmdid; /* Which command completed */
rt_le16_t status; /* Command status */
};
enum
{
RT_NVME_REG_CAP = 0x0000, /* Controller Capabilities */
RT_NVME_REG_VS = 0x0008, /* Version */
RT_NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
RT_NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
RT_NVME_REG_CC = 0x0014, /* Controller Configuration */
RT_NVME_REG_CSTS = 0x001c, /* Controller Status */
RT_NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
RT_NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
RT_NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
RT_NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
RT_NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
RT_NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
RT_NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
RT_NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
RT_NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer Location */
RT_NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory Space Control */
RT_NVME_REG_CRTO = 0x0068, /* Controller Ready Timeouts */
RT_NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
RT_NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
RT_NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
RT_NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity Buffer Size */
RT_NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained Write Throughput */
RT_NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
};
#define RT_NVME_CAP_MQES(cap) ((cap) & 0xffff)
#define RT_NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define RT_NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
#define RT_NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define RT_NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
#define RT_NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
#define RT_NVME_AQ_DEPTH 32
#define RT_NVME_NR_AEN_COMMANDS 1
#define RT_NVME_AQ_BLK_MQ_DEPTH (RT_NVME_AQ_DEPTH - RT_NVME_NR_AEN_COMMANDS)
#define RT_NVME_AQ_MQ_TAG_DEPTH (RT_NVME_AQ_BLK_MQ_DEPTH - 1)
enum
{
RT_NVME_CC_ENABLE = 1 << 0,
RT_NVME_CC_CSS_NVM = 0 << 4,
RT_NVME_CC_MPS_SHIFT = 7,
RT_NVME_CC_ARB_RR = 0 << 11,
RT_NVME_CC_ARB_WRRU = 1 << 11,
RT_NVME_CC_ARB_VS = 7 << 11,
RT_NVME_CC_SHN_NONE = 0 << 14,
RT_NVME_CC_SHN_NORMAL = 1 << 14,
RT_NVME_CC_SHN_ABRUPT = 2 << 14,
RT_NVME_CC_SHN_MASK = 3 << 14,
RT_NVME_CC_IOSQES = 6 << 16,
RT_NVME_CC_IOCQES = 4 << 20,
RT_NVME_CSTS_RDY = 1 << 0,
RT_NVME_CSTS_CFS = 1 << 1,
RT_NVME_CSTS_SHST_NORMAL = 0 << 2,
RT_NVME_CSTS_SHST_OCCUR = 1 << 2,
RT_NVME_CSTS_SHST_CMPLT = 2 << 2,
RT_NVME_CSTS_SHST_MASK = 3 << 2,
};
rt_packed(struct rt_nvme_id_power_state
{
rt_le16_t mp; /* Maximum Power */
NVME_RSVD(1, 1);
rt_uint8_t mxps_nops; /* Max Power Scale, Non-Operational State */
rt_le32_t enlat; /* Entry Latency: microseconds */
rt_le32_t exlat; /* Exit Latency: microseconds */
rt_uint8_t rrt; /* Relative Read Throughput */
rt_uint8_t rrl; /* Relative Read Latency */
rt_uint8_t rwt; /* Relative Write Throughput */
rt_uint8_t rwl; /* Relative Write Latency */
rt_le16_t idlp; /* Idle Power */
rt_uint8_t ips; /* Idle Power Scale */
NVME_RSVD(19, 1);
rt_le16_t actp; /* Active Power */
rt_uint8_t apw_aps; /* Active Power Workload, Active Power Scale */
NVME_RSVD(23, 9);
});
rt_packed(struct rt_nvme_id_ctrl
{
/* Controller Capabilities and Features */
rt_le16_t vid; /* PCI Vendor ID */
rt_le16_t ssvid; /* PCI Subsystem Vendor */
char sn[20]; /* Serial Number */
char mn[40]; /* Model Number */
char fr[8]; /* Firmware Revision */
rt_uint8_t rab; /* Recommended Arbitration Burst */
rt_uint8_t ieee[3]; /* IEEE OUI Identifier */
rt_uint8_t mic; /* Controller Multi-Path I/O and Namespace Sharing Capabilities */
rt_uint8_t mdts; /* Maximum Data Transfer Size */
rt_uint16_t cntlid; /* Controller ID */
rt_uint32_t ver; /* Version */
rt_uint32_t rtd3r; /* RTD3 Resume Latency */
rt_uint32_t rtd3e; /* RTD3 Entry Latency */
rt_uint32_t oaes; /* Optional Asynchronous Events Supported */
#define RT_NVME_ID_CTRATT_ELBAS 15 /* Extended LBA Formats Supported */
#define RT_NVME_ID_CTRATT_DNVMS 14 /* Delete NVM Set */
#define RT_NVME_ID_CTRATT_DEG 13 /* Delete Endurance Group */
#define RT_NVME_ID_CTRATT_VCM 12 /* Variable Capacity Management */
#define RT_NVME_ID_CTRATT_FCM 11 /* Fixed Capacity Management */
#define RT_NVME_ID_CTRATT_MDS 10 /* Multi-Domain Subsystem */
#define RT_NVME_ID_CTRATT_UUIDL 9 /* UUID List */
#define RT_NVME_ID_CTRATT_SQA 8 /* SQ Associations */
#define RT_NVME_ID_CTRATT_NG 7 /* Namespace Granularity */
#define RT_NVME_ID_CTRATT_TBKAS 6 /* Traffic Based Keep Alive Support */
#define RT_NVME_ID_CTRATT_PLM 5 /* Predictable Latency Mode */
#define RT_NVME_ID_CTRATT_EG 4 /* Endurance Groups */
#define RT_NVME_ID_CTRATT_RRL 3 /* Read Recovery Levels */
#define RT_NVME_ID_CTRATT_NVMS 2 /* NVM Sets */
#define RT_NVME_ID_CTRATT_NOPSPM 1 /* Non-Operational Power State Permissive Mode */
#define RT_NVME_ID_CTRATT_HIS 0 /* Host Identifier Support */
rt_uint32_t ctratt; /* Controller Attributes */
rt_uint16_t rrls; /* Read Recovery Levels Supported */
NVME_RSVD(102, 9);
rt_uint8_t cntrltype; /* Controller Type */
rt_uint8_t fguid[16]; /* FRU Globally Unique Identifier */
rt_uint16_t crdt1; /* Command Retry Delay Time 1 */
rt_uint16_t crdt2; /* Command Retry Delay Time 2 */
rt_uint16_t crdt3; /* Command Retry Delay Time 3 */
NVME_RSVD(134, 119);
#define RT_NVME_ID_NVMSR_NVMEE 1 /* NVMe Enclosure */
#define RT_NVME_ID_NVMSR_NVMESD 0 /* NVMe Storage Device */
rt_uint8_t nvmsr; /* NVM Subsystem Report */
#define RT_NVME_ID_VWCI_VWCRV 7 /* VPD Write Cycles Remaining Valid */
#define RT_NVME_ID_VWCI_VWCR 0 /* VPD Write Cycles Remaining */
rt_uint8_t vwci; /* VPD Write Cycle Information */
#define RT_NVME_ID_MEC_PCIEME 1 /* PCIe Port Management Endpoint */
#define RT_NVME_ID_MEC_SMBUSME 0 /* SMBus/I2C Port Management Endpoint */
rt_uint8_t mec; /* Management Endpoint Capabilities */
/* Admin Command Set Attributes & Optional Controller Capabilities */
rt_le16_t oacs; /* Optional Admin Command Support */
rt_uint8_t acl; /* Abort Command Limit */
rt_uint8_t aerl; /* Asynchronous Event Request Limit */
#define RT_NVME_ID_FRMW_SMUD 5 /* Support Multiple Update Detection */
#define RT_NVME_ID_FRMW_FAWR 4 /* Firmware Activation Without Reset */
#define RT_NVME_ID_FRMW_NOFS 1 /* Number Of Firmware Slots */
#define RT_NVME_ID_FRMW_FFSRO 0 /* First Firmware Slot Read Only */
rt_uint8_t frmw; /* Firmware Updates */
rt_uint8_t lpa; /* Log Page Attributes */
rt_uint8_t elpe; /* Error Log Page Entries */
rt_uint8_t npss; /* Number of Power States Support */
rt_uint8_t avscc; /* Admin Vendor Specific Command Configuration */
rt_uint8_t apsta; /* Autonomous Power State Transition Attributes */
rt_le16_t wctemp; /* Warning Composite Temperature Threshold */
rt_le16_t cctemp; /* Critical Composite Temperature Threshold */
rt_uint16_t mtfa; /* Maximum Time for Firmware Activation */
rt_uint32_t hmpre; /* Host Memory Buffer Preferred Size */
rt_uint32_t hmmin; /* Host Memory Buffer Minimum Size */
rt_uint8_t tnvmcap[16]; /* Total NVM Capacity */
rt_uint8_t unvmcap[16]; /* Unallocated NVM Capacity */
#define RT_NVME_ID_RPMBS_ASZ 24 /* Access Size */
#define RT_NVME_ID_RPMBS_TSZ 16 /* Total Size */
#define RT_NVME_ID_RPMBS_AM 3 /* Authentication Method */
#define RT_NVME_ID_RPMBS_NORPMBU 2 /* Number of RPMB Units */
rt_uint32_t rpmbs; /* Replay Protected Memory Block Support */
rt_uint16_t edstt; /* Extended Device Self-test Time */
rt_uint8_t dsto; /* Device Self-test Options */
rt_uint8_t fwug; /* Firmware Update Granularity */
rt_uint16_t kas; /* Keep Alive Support */
rt_uint16_t hctma; /* Host Controlled Thermal Management Attributes */
rt_uint16_t mntmt; /* Minimum Thermal Management Temperature */
rt_uint16_t mxtmt; /* Maximum Thermal Management Temperature */
#define RT_NVME_ID_SANICAP_NODMMAS 30 /* No-Deallocate Modifies Media After Sanitize */
#define RT_NVME_ID_SANICAP_NDI 29 /* No-Deallocate Inhibited */
#define RT_NVME_ID_SANICAP_OWS 2 /* Overwrite Support */
#define RT_NVME_ID_SANICAP_BES 1 /* Block Erase Support */
#define RT_NVME_ID_SANICAP_CES 0 /* Crypto Erase Support */
rt_uint32_t sanicap; /* Sanitize Capabilities */
rt_uint32_t hmminds; /* Host Memory Buffer Minimum Descriptor Entry Size */
rt_uint16_t hmmaxd; /* Host Memory Maximum Descriptors Entries */
rt_uint16_t nsetidmax; /* NVM Set Identifier Maximum */
rt_uint16_t endgidmax; /* Endurance Group Identifier Maximum */
rt_uint8_t anatt; /* ANA Transition Time */
rt_uint8_t anacap; /* Asymmetric Namespace Access Capabilities */
rt_uint32_t anagrpmax; /* ANA Group Identifier Maximum */
rt_uint32_t nanagrpid; /* Number of ANA Group Identifiers */
rt_uint32_t pels; /* Persistent Event Log Size */
rt_uint16_t dmid; /* Domain Identifier */
NVME_RSVD(358, 10);
rt_uint8_t megcap[16]; /* Max Endurance Group Capacity */
NVME_RSVD(384, 128);
/* NVM Command Set Attributes */
rt_uint8_t sqes; /* Submission Queue Entry Size */
rt_uint8_t cqes; /* Completion Queue Entry Size */
rt_le16_t maxcmd; /* Maximum Outstanding Commands */
rt_le32_t nn; /* Number of Namespaces */
rt_le16_t oncs; /* Optional NVM Command Support */
rt_le16_t fuses; /* Fused Operation Support */
rt_uint8_t fna; /* Format NVM Attributes */
rt_uint8_t vwc; /* Volatile Write Cache */
rt_le16_t awun; /* Atomic Write Unit Normal */
rt_le16_t awupf; /* Atomic Write Unit Power Fail */
rt_uint8_t nvscc; /* I/O Command Set Vendor Specific Command Configuration */
rt_uint8_t nwpc; /* Namespace Write Protection Capabilities */
rt_le16_t acwu; /* Atomic Compare & Write Unit */
rt_le16_t cdfs; /* Copy Descriptor Formats Supported */
#define RT_NVME_ID_SGL_SUPPORT_MASK 0x3
rt_le32_t sgls; /* SGL Support */
rt_uint32_t mnan; /* Maximum Number of Allowed Namespaces */
char maxdna[16]; /* Maximum Domain Namespace Attachments */
rt_le32_t maxcna; /* Maximum I/O Controller Namespace Attachments */
NVME_RSVD(564, 204);
rt_uint8_t subnqn[256]; /* NVM Subsystem NVMe Qualified Name */
NVME_RSVD(1024, 768);
rt_le32_t ioccsz; /* I/O Queue Command Capsule Supported Size */
rt_le32_t iorcsz; /* I/O Queue Response Capsule Supported Size */
rt_le16_t icdoff; /* In Capsule Data Offset */
rt_uint8_t ctrattr; /* Fabrics Controller Attributes */
rt_uint8_t msdbd; /* Maximum SGL Data Block Descriptors */
rt_le16_t ofcs; /* Optional Fabric Commands Support */
rt_uint8_t dctype;
NVME_RSVD(1807, 241);
/* Power State Descriptors */
struct rt_nvme_id_power_state psd[32];
/* Vendor Specific */
rt_uint8_t vs[1024];
});
enum
{
RT_NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
RT_NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
RT_NVME_CTRL_CMIC_ANA = 1 << 3,
RT_NVME_CTRL_ONCS_COMPARE = 1 << 0,
RT_NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
RT_NVME_CTRL_ONCS_DSM = 1 << 2,
RT_NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
RT_NVME_CTRL_ONCS_RESERVATIONS = 1 << 5,
RT_NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
RT_NVME_CTRL_VWC_PRESENT = 1 << 0,
RT_NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
RT_NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
RT_NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
RT_NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
RT_NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
RT_NVME_CTRL_CTRATT_128_ID = 1 << 0,
RT_NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
RT_NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
RT_NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
RT_NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
RT_NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
RT_NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
RT_NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
};
struct rt_nvme_lba_format
{
rt_le16_t ms; /* Metadata size */
rt_uint8_t ds; /* Data size */
rt_uint8_t rp; /* Relative performance */
};
rt_packed(struct rt_nvme_id_ns
{
rt_le64_t nsze; /* Namespace size */
rt_le64_t ncap; /* Namespace capacity */
rt_le64_t nuse; /* Namespace utilization */
rt_uint8_t nsfeat; /* Namespace features */
rt_uint8_t nlbaf; /* Number of lba formats */
rt_uint8_t flbas; /* Formatted lba size */
rt_uint8_t mc; /* Metadata capabilities */
rt_uint8_t dpc; /* End-to-end data protection capabilities */
rt_uint8_t dps; /* End-to-end data protection type settings */
rt_uint8_t nmic; /* Namespace Multi-path I/O and Namespace Sharing Capabilities */
rt_uint8_t rescap; /* Reservation Capabilities */
rt_uint8_t fpi; /* Format Progress Indicator */
rt_uint8_t dlfeat; /* Deallocate Logical Block Features */
rt_le16_t nawun; /* Namespace Atomic Write Unit Normal */
rt_le16_t nawupf; /* Namespace Atomic Write Unit Power Fail */
rt_le16_t nacwu; /* Namespace Atomic Compare & Write Unit */
rt_le16_t nabsn; /* Namespace Atomic Boundary Size Normal */
rt_le16_t nabo; /* Namespace Atomic Boundary Offset */
rt_le16_t nabspf; /* Namespace Atomic Boundary Size Power Fail */
rt_uint16_t noiob; /* Namespace Optimal IO Boundary */
rt_le64_t nvmcap[2]; /* NVMe Capacity */
rt_uint16_t npwg; /* Namespace Preferred Write Granularity */
rt_uint16_t npwa; /* Namespace Preferred Write Alignment */
rt_uint16_t npdg; /* Namespace Preferred Deallocate Granularity */
rt_uint16_t npda; /* Namespace Preferred Deallocate Alignment */
rt_uint16_t nows; /* Namespace Optimal Write Size */
NVME_RSVD(118, 18);
rt_uint32_t anagrpid; /* ANA Group Identifier */
NVME_RSVD(139, 3);
rt_uint8_t nsattr; /* Namespace Attributes */
rt_uint16_t nvmsetid; /* NVMe Set Identifier */
rt_uint16_t endgid; /* Endurance Group Identifier */
rt_uint8_t nguid[16]; /* Namespace Globally Unique Identifier */
rt_uint8_t eui64[8]; /* IEEE Extended Unique Identifier */
/* Logical Block Address Format */
struct rt_nvme_lba_format lbaf[16];
NVME_RSVD(171, 192);
/* Vendor specific */
rt_uint8_t vs[3712];
});
enum
{
RT_NVME_NS_FEAT_THIN = 1 << 0,
RT_NVME_NS_FLBAS_LBA_MASK = 0xf,
RT_NVME_NS_FLBAS_LBA_UMASK = 0x60,
RT_NVME_NS_FLBAS_LBA_SHIFT = 1,
RT_NVME_NS_FLBAS_META_EXT = 0x10,
RT_NVME_LBAF_RP_BEST = 0,
RT_NVME_LBAF_RP_BETTER = 1,
RT_NVME_LBAF_RP_GOOD = 2,
RT_NVME_LBAF_RP_DEGRADED = 3,
RT_NVME_NS_DPC_PI_LAST = 1 << 4,
RT_NVME_NS_DPC_PI_FIRST = 1 << 3,
RT_NVME_NS_DPC_PI_TYPE3 = 1 << 2,
RT_NVME_NS_DPC_PI_TYPE2 = 1 << 1,
RT_NVME_NS_DPC_PI_TYPE1 = 1 << 0,
RT_NVME_NS_DPS_PI_FIRST = 1 << 3,
RT_NVME_NS_DPS_PI_MASK = 0x7,
RT_NVME_NS_DPS_PI_TYPE1 = 1,
RT_NVME_NS_DPS_PI_TYPE2 = 2,
RT_NVME_NS_DPS_PI_TYPE3 = 3,
};
struct rt_nvme_ops;
struct rt_nvme_controller;
/*
* An NVM Express queue. Each device has at least two (one for admin commands
* and one for I/O commands).
*/
struct rt_nvme_queue
{
struct rt_nvme_controller *nvme;
struct rt_nvme_command *sq_cmds;
struct rt_nvme_completion *cq_entry;
rt_ubase_t sq_cmds_phy;
rt_ubase_t cq_entry_phy;
rt_uint32_t *doorbell;
rt_uint16_t qid;
rt_uint16_t depth;
rt_uint16_t sq_head;
rt_uint16_t sq_tail;
rt_uint16_t cq_head;
rt_uint16_t cq_phase;
rt_err_t err;
struct rt_nvme_command *cmd;
struct rt_completion done;
struct rt_spinlock lock;
};
struct rt_nvme_controller
{
rt_list_t list;
struct rt_device *dev;
int nvme_id;
char name[RT_NAME_MAX];
void *regs;
rt_uint64_t cap;
rt_uint32_t page_shift;
rt_uint32_t page_size;
rt_uint32_t queue_depth;
rt_uint32_t io_queue_max;
rt_uint32_t ctrl_config;
rt_uint32_t max_transfer_shift:8;
rt_uint32_t volatile_write_cache:8;
rt_uint32_t write_zeroes:1;
rt_uint32_t sgl_mode:2;
rt_uint32_t doorbell_stride;
rt_uint32_t *doorbell_tbl;
const struct rt_nvme_ops *ops;
#define RT_USING_NVME_QUEUE (1 + (RT_USING_NVME_IO_QUEUE * RT_CPUS_NR))
int irqs_nr;
int irqs[RT_USING_NVME_QUEUE];
union
{
struct
{
struct rt_nvme_queue admin_queue;
struct rt_nvme_queue io_queues[RT_USING_NVME_IO_QUEUE * RT_CPUS_NR];
};
struct rt_nvme_queue queue[RT_USING_NVME_QUEUE];
};
volatile rt_atomic_t cmdid;
volatile rt_atomic_t ioqid[RT_CPUS_NR];
rt_list_t ns_nodes;
};
struct rt_nvme_device
{
struct rt_blk_disk parent;
struct rt_nvme_controller *ctrl;
rt_list_t list;
rt_uint32_t nsid;
rt_uint32_t lba_shift;
struct rt_nvme_id_ns id;
};
#define rt_disk_to_nvme_device(disk) rt_container_of(disk, struct rt_nvme_device, parent)
struct rt_nvme_ops
{
const char *name;
/* Controller-specific NVM Express queue setup */
rt_err_t (*setup_queue)(struct rt_nvme_queue *queue);
/* Controller-specific NVM Express queue cleanup */
rt_err_t (*cleanup_queue)(struct rt_nvme_queue *queue);
/* Controller-specific NVM Express command submission */
rt_err_t (*submit_cmd)(struct rt_nvme_queue *queue, struct rt_nvme_command *cmd);
/* Controller-specific NVM Express command completion */
void (*complete_cmd)(struct rt_nvme_queue *queue, struct rt_nvme_command *cmd);
};
rt_err_t rt_nvme_controller_register(struct rt_nvme_controller *nvme);
rt_err_t rt_nvme_controller_unregister(struct rt_nvme_controller *nvme);
#endif /* __NVME_H__ */

View File

@@ -37,6 +37,7 @@ struct rt_ofw_node
/* phandles range from 1 to 2^32-2 (0xfffffffe) */
rt_phandle phandle;
struct rt_device *dev;
struct rt_ofw_prop *props;
struct rt_ofw_node *parent;
struct rt_ofw_node *child;
@@ -376,9 +377,9 @@ rt_inline rt_bool_t rt_ofw_node_is_type(const struct rt_ofw_node *np, const char
for (np = rt_ofw_find_node_by_type(RT_NULL, type); np; \
np = rt_ofw_find_node_by_type(np, type))
#define rt_ofw_foreach_node_by_compatible(np, type, compatible) \
for (np = rt_ofw_find_node_by_compatible(RT_NULL, type, compatible); np; \
np = rt_ofw_find_node_by_compatible(np, type, compatible))
#define rt_ofw_foreach_node_by_compatible(np, compatible) \
for (np = rt_ofw_find_node_by_compatible(RT_NULL, compatible); np; \
np = rt_ofw_find_node_by_compatible(np, compatible))
#define rt_ofw_foreach_node_by_ids_r(np, id, ids) \
for (np = rt_ofw_find_node_by_ids_r(RT_NULL, ids, id); \

View File

@@ -70,6 +70,7 @@ rt_err_t rt_fdt_boot_dump(void);
void rt_fdt_earlycon_output(const char *str);
void rt_fdt_earlycon_kick(int why);
rt_err_t rt_fdt_scan_chosen_stdout(void);
rt_err_t rt_fdt_bootargs_select(const char *key, int index, const char **out_result);
rt_err_t rt_fdt_unflatten(void);
struct rt_ofw_node *rt_fdt_unflatten_single(void *fdt);

View File

@@ -26,6 +26,27 @@ rt_err_t rt_ofw_get_address_by_name(struct rt_ofw_node *np, const char *name,
int rt_ofw_get_address_array(struct rt_ofw_node *np, int nr, rt_uint64_t *out_regs);
rt_uint64_t rt_ofw_translate_address(struct rt_ofw_node *np, const char *range_type, rt_uint64_t address);
rt_uint64_t rt_ofw_reverse_address(struct rt_ofw_node *np, const char *range_type, rt_uint64_t address);
rt_inline rt_uint64_t rt_ofw_translate_dma2cpu(struct rt_ofw_node *np, rt_uint64_t address)
{
rt_uint64_t bus_addr, cpu_addr;
bus_addr = rt_ofw_reverse_address(np, "dma-ranges", address);
cpu_addr = rt_ofw_translate_address(np, "ranges", bus_addr);
return cpu_addr != ~0ULL ? cpu_addr : address;
}
rt_inline rt_uint64_t rt_ofw_translate_cpu2dma(struct rt_ofw_node *np, rt_uint64_t address)
{
rt_uint64_t bus_addr, dma_addr;
bus_addr = rt_ofw_reverse_address(np, "ranges", address);
dma_addr = rt_ofw_translate_address(np, "dma-ranges", bus_addr);
return dma_addr != ~0ULL ? dma_addr : address;
}
void *rt_ofw_iomap(struct rt_ofw_node *np, int index);
void *rt_ofw_iomap_by_name(struct rt_ofw_node *np, const char *name);

View File

@@ -0,0 +1,604 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#ifndef __PCI_H__
#define __PCI_H__
#include <rtdef.h>
#include <bitmap.h>
#include <ioremap.h>
#include <drivers/ofw.h>
#include <drivers/pic.h>
#include <drivers/core/dm.h>
#include <drivers/core/driver.h>
#include "../../pci/pci_ids.h"
#include "../../pci/pci_regs.h"
#define RT_PCI_INTX_PIN_MAX 4
#define RT_PCI_BAR_NR_MAX 6
#define RT_PCI_DEVICE_MAX 32
#define RT_PCI_FUNCTION_MAX 8
#define RT_PCI_FIND_CAP_TTL 48
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
* in a single byte as follows:
*
* 7:3 = slot
* 2:0 = function
*/
#define RT_PCI_DEVID(bus, devfn) ((((rt_uint16_t)(bus)) << 8) | (devfn))
#define RT_PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
#define RT_PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
#define RT_PCI_FUNC(devfn) ((devfn) & 0x07)
#define PCIE_LINK_STATE_L0S RT_BIT(0)
#define PCIE_LINK_STATE_L1 RT_BIT(1)
#define PCIE_LINK_STATE_CLKPM RT_BIT(2)
#define PCIE_LINK_STATE_L1_1 RT_BIT(3)
#define PCIE_LINK_STATE_L1_2 RT_BIT(4)
#define PCIE_LINK_STATE_L1_1_PCIPM RT_BIT(5)
#define PCIE_LINK_STATE_L1_2_PCIPM RT_BIT(6)
#define PCIE_LINK_STATE_ALL \
( \
PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | \
PCIE_LINK_STATE_CLKPM | \
PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2 | \
PCIE_LINK_STATE_L1_1_PCIPM | PCIE_LINK_STATE_L1_2_PCIPM \
)
struct rt_pci_bus_region
{
rt_uint64_t phy_addr;
rt_uint64_t cpu_addr;
rt_uint64_t size;
rt_uint64_t bus_start;
#define PCI_BUS_REGION_F_NONE 0xffffffff /* PCI no memory */
#define PCI_BUS_REGION_F_MEM 0x00000000 /* PCI memory space */
#define PCI_BUS_REGION_F_IO 0x00000001 /* PCI IO space */
#define PCI_BUS_REGION_F_PREFETCH 0x00000008 /* Prefetchable PCI memory */
rt_ubase_t flags;
};
struct rt_pci_bus_resource
{
rt_ubase_t base;
rt_size_t size;
rt_ubase_t flags;
};
/*
* PCI topology:
*
* +-----+-----+ +-------------+ PCI Bus 0 +------------+ PCI Bus 1
* | RAM | CPU |---------| Host Bridge |--------+-----| PCI Bridge |-----+
* +-----+-----+ +-------------+ | +------------+ | +-------------+
* | +----| End Point 2 |
* +-------------+ +-------------+ | +-------------+ | +-------------+
* | End Point 5 |----+ | End Point 0 |-------+ | End Point 3 |----+
* +-------------+ | +-------------+ | +-------------+ |
* | | |
* +-------------+ | +-------------+ | +-------------+ | +-------------+
* | End Point 6 |----+----| ISA Bridge |-------+-----| End Point 1 | +----| End Point 4 |
* +-------------+ +-------------+ | +-------------+ +-------------+
* |
* +------+ +----------------+ |
* | Port |---------| CardBus Bridge |----+
* +------+ +----------------+
*/
struct rt_pci_bus;
struct rt_pci_device_id
{
#define PCI_ANY_ID (~0)
#define RT_PCI_DEVICE_ID(vend, dev) \
.vendor = (vend), \
.device = (dev), \
.subsystem_vendor = PCI_ANY_ID, \
.subsystem_device = PCI_ANY_ID
#define RT_PCI_DEVICE_CLASS(dev_class, dev_class_mask) \
.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
.subsystem_vendor = PCI_ANY_ID, \
.subsystem_device = PCI_ANY_ID, \
.class = (dev_class), .class_mask = (dev_class_mask),
rt_uint32_t vendor, device; /* Vendor and device ID or PCI_ANY_ID */
rt_uint32_t subsystem_vendor; /* Subsystem ID's or PCI_ANY_ID */
rt_uint32_t subsystem_device; /* Subsystem ID's or PCI_ANY_ID */
rt_uint32_t class, class_mask; /* (class, subclass, prog-if) triplet */
const void *data;
};
struct rt_pci_device
{
struct rt_device parent;
const char *name;
rt_list_t list;
struct rt_pci_bus *bus;
struct rt_pci_bus *subbus; /* In PCI-to-PCI bridge, 'End Point' or 'Port' is NULL */
const struct rt_pci_device_id *id;
rt_uint32_t devfn; /* Encoded device & function index */
rt_uint16_t vendor;
rt_uint16_t device;
rt_uint16_t subsystem_vendor;
rt_uint16_t subsystem_device;
rt_uint32_t class; /* 3 bytes: (base, sub, prog-if) */
rt_uint8_t revision;
rt_uint8_t hdr_type;
rt_uint8_t max_latency;
rt_uint8_t min_grantl;
rt_uint8_t int_pin;
rt_uint8_t int_line;
rt_uint16_t exp_flags;
rt_uint32_t cfg_size;
void *sysdata;
int irq;
rt_uint8_t pin;
struct rt_pic *intx_pic;
struct rt_pci_bus_resource resource[RT_PCI_BAR_NR_MAX];
rt_uint8_t pme_cap;
rt_uint8_t msi_cap;
rt_uint8_t msix_cap;
rt_uint8_t pcie_cap;
rt_uint8_t busmaster:1; /* Is the bus master */
rt_uint8_t multi_function:1; /* Multi-function device */
rt_uint8_t ari_enabled:1; /* Alternative Routing-ID Interpretation */
rt_uint8_t no_msi:1; /* May not use MSI */
rt_uint8_t no_64bit_msi:1; /* May only use 32-bit MSIs */
rt_uint8_t msi_enabled:1; /* MSI enable */
rt_uint8_t msix_enabled:1; /* MSIx enable */
rt_uint8_t broken_intx_masking:1; /* INTx masking can't be used */
rt_uint8_t pme_support:5; /* Bitmask of states from which PME# can be generated */
#ifdef RT_PCI_MSI
void *msix_base;
struct rt_pic *msi_pic;
rt_list_t msi_desc_nodes;
struct rt_spinlock msi_lock;
#endif
};
struct rt_pci_host_bridge
{
struct rt_device parent;
rt_uint32_t domain;
struct rt_pci_bus *root_bus;
const struct rt_pci_ops *ops;
const struct rt_pci_ops *child_ops;
rt_uint32_t bus_range[2];
rt_size_t bus_regions_nr;
struct rt_pci_bus_region *bus_regions;
rt_size_t dma_regions_nr;
struct rt_pci_bus_region *dma_regions;
rt_uint8_t (*irq_slot)(struct rt_pci_device *pdev, rt_uint8_t *pinp);
int (*irq_map)(struct rt_pci_device *pdev, rt_uint8_t slot, rt_uint8_t pin);
void *sysdata;
rt_uint8_t priv[0];
};
#define rt_device_to_pci_host_bridge(dev) rt_container_of(dev, struct rt_pci_host_bridge, parent)
struct rt_pci_ops
{
rt_err_t (*add)(struct rt_pci_bus *bus);
rt_err_t (*remove)(struct rt_pci_bus *bus);
void *(*map)(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg);
rt_err_t (*read)(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
rt_err_t (*write)(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
};
struct rt_pci_bus
{
rt_list_t list;
rt_list_t children_nodes;
rt_list_t devices_nodes;
struct rt_pci_bus *parent;
union
{
/* In PCI-to-PCI bridge, parent is not NULL */
struct rt_pci_device *self;
/* In Host bridge, this is Root bus ('PCI Bus 0') */
struct rt_pci_host_bridge *host_bridge;
};
const struct rt_pci_ops *ops;
char name[48];
char number;
struct rt_spinlock lock;
void *sysdata;
};
struct rt_pci_driver
{
struct rt_driver parent;
const char *name;
const struct rt_pci_device_id *ids;
rt_err_t (*probe)(struct rt_pci_device *pdev);
rt_err_t (*remove)(struct rt_pci_device *pdev);
rt_err_t (*shutdown)(struct rt_pci_device *pdev);
};
struct rt_pci_msix_entry
{
int irq;
int index;
};
enum rt_pci_power
{
RT_PCI_D0,
RT_PCI_D1,
RT_PCI_D2,
RT_PCI_D3HOT,
RT_PCI_D3COLD,
RT_PCI_PME_MAX,
};
void rt_pci_pme_init(struct rt_pci_device *pdev);
void rt_pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable);
rt_err_t rt_pci_enable_wake(struct rt_pci_device *pci_dev,
enum rt_pci_power state, rt_bool_t enable);
rt_inline rt_bool_t rt_pci_pme_capable(struct rt_pci_device *pdev,
enum rt_pci_power state)
{
if (!pdev->pme_cap)
{
return RT_FALSE;
}
return !!(pdev->pme_support & (1 << state));
}
void rt_pci_msi_init(struct rt_pci_device *pdev);
void rt_pci_msix_init(struct rt_pci_device *pdev);
void rt_pci_set_master(struct rt_pci_device *pdev);
void rt_pci_clear_master(struct rt_pci_device *pdev);
struct rt_pci_host_bridge *rt_pci_host_bridge_alloc(rt_size_t priv_size);
rt_err_t rt_pci_host_bridge_free(struct rt_pci_host_bridge *);
rt_err_t rt_pci_host_bridge_init(struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_host_bridge_probe(struct rt_pci_host_bridge *host_bridge);
struct rt_pci_device *rt_pci_alloc_device(struct rt_pci_bus *bus);
struct rt_pci_device *rt_pci_scan_single_device(struct rt_pci_bus *bus, rt_uint32_t devfn);
rt_err_t rt_pci_setup_device(struct rt_pci_device *pdev);
rt_size_t rt_pci_scan_slot(struct rt_pci_bus *bus, rt_uint32_t devfn);
rt_uint32_t rt_pci_scan_child_buses(struct rt_pci_bus *bus, rt_size_t buses);
rt_uint32_t rt_pci_scan_child_bus(struct rt_pci_bus *bus);
rt_err_t rt_pci_host_bridge_register(struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_scan_root_bus_bridge(struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_host_bridge_remove(struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_bus_remove(struct rt_pci_bus *bus);
rt_err_t rt_pci_device_remove(struct rt_pci_device *pdev);
rt_uint32_t rt_pci_domain(struct rt_pci_device *pdev);
rt_uint8_t rt_pci_bus_find_capability(struct rt_pci_bus *bus, rt_uint32_t devfn, int cap);
rt_uint8_t rt_pci_find_capability(struct rt_pci_device *pdev, int cap);
rt_uint8_t rt_pci_find_next_capability(struct rt_pci_device *pdev, rt_uint8_t pos, int cap);
rt_uint16_t rt_pci_find_ext_capability(struct rt_pci_device *pdev, int cap);
rt_uint16_t rt_pci_find_ext_next_capability(struct rt_pci_device *pdev, rt_uint16_t pos, int cap);
struct rt_pci_bus *rt_pci_find_root_bus(struct rt_pci_bus *bus);
struct rt_pci_host_bridge *rt_pci_find_host_bridge(struct rt_pci_bus *bus);
rt_inline rt_uint16_t rt_pci_dev_id(struct rt_pci_device *pdev)
{
return RT_PCI_DEVID(pdev->bus->number, pdev->devfn);
}
rt_inline rt_bool_t rt_pci_is_root_bus(struct rt_pci_bus *bus)
{
return bus->parent ? RT_FALSE : RT_TRUE;
}
rt_inline rt_bool_t rt_pci_is_bridge(struct rt_pci_device *pdev)
{
return pdev->hdr_type == PCIM_HDRTYPE_BRIDGE ||
pdev->hdr_type == PCIM_HDRTYPE_CARDBUS;
}
rt_inline rt_bool_t rt_pci_is_pcie(struct rt_pci_device *pdev)
{
return !!pdev->pcie_cap;
}
#define rt_pci_foreach_bridge(pdev, bus) \
rt_list_for_each_entry(pdev, &bus->devices_nodes, list) \
if (rt_pci_is_bridge(pdev))
rt_err_t rt_pci_bus_read_config_u8(struct rt_pci_bus *bus,
rt_uint32_t devfn, int pos, rt_uint8_t *value);
rt_err_t rt_pci_bus_read_config_u16(struct rt_pci_bus *bus,
rt_uint32_t devfn, int pos, rt_uint16_t *value);
rt_err_t rt_pci_bus_read_config_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int pos, rt_uint32_t *value);
rt_err_t rt_pci_bus_write_config_u8(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, rt_uint8_t value);
rt_err_t rt_pci_bus_write_config_u16(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, rt_uint16_t value);
rt_err_t rt_pci_bus_write_config_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, rt_uint32_t value);
rt_err_t rt_pci_bus_read_config_uxx(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
rt_err_t rt_pci_bus_write_config_uxx(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
rt_err_t rt_pci_bus_read_config_generic_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
rt_err_t rt_pci_bus_write_config_generic_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
rt_inline rt_err_t rt_pci_read_config_u8(const struct rt_pci_device *pdev,
int reg, rt_uint8_t *value)
{
return rt_pci_bus_read_config_u8(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_read_config_u16(const struct rt_pci_device *pdev,
int reg, rt_uint16_t *value)
{
return rt_pci_bus_read_config_u16(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_read_config_u32(const struct rt_pci_device *pdev,
int reg, rt_uint32_t *value)
{
return rt_pci_bus_read_config_u32(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_write_config_u8(const struct rt_pci_device *pdev,
int reg, rt_uint8_t value)
{
return rt_pci_bus_write_config_u8(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_write_config_u16(const struct rt_pci_device *pdev,
int reg, rt_uint16_t value)
{
return rt_pci_bus_write_config_u16(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_write_config_u32(const struct rt_pci_device *pdev,
int reg, rt_uint32_t value)
{
return rt_pci_bus_write_config_u32(pdev->bus, pdev->devfn, reg, value);
}
#ifdef RT_USING_OFW
int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
rt_uint8_t slot, rt_uint8_t pin);
rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus);
rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus);
rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev);
rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev);
#else
rt_inline rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge)
{
return RT_EOK;
}
rt_inline rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus)
{
return RT_EOK;
}
rt_inline rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus)
{
return RT_EOK;
}
rt_inline rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev)
{
return RT_EOK;
}
rt_inline rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev)
{
return RT_EOK;
}
rt_inline int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
rt_uint8_t slot, rt_uint8_t pin)
{
return -1;
}
rt_inline rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge)
{
return -RT_ENOSYS;
}
#endif /* RT_USING_OFW */
rt_inline void *rt_pci_iomap(struct rt_pci_device *pdev, int bar_idx)
{
struct rt_pci_bus_resource *res = &pdev->resource[bar_idx];
RT_ASSERT(bar_idx < RT_ARRAY_SIZE(pdev->resource));
return rt_ioremap((void *)res->base, res->size);
}
rt_uint8_t rt_pci_irq_intx(struct rt_pci_device *pdev, rt_uint8_t pin);
rt_uint8_t rt_pci_irq_slot(struct rt_pci_device *pdev, rt_uint8_t *pinp);
void rt_pci_assign_irq(struct rt_pci_device *pdev);
void rt_pci_intx(struct rt_pci_device *pdev, rt_bool_t enable);
rt_bool_t rt_pci_check_and_mask_intx(struct rt_pci_device *pdev);
rt_bool_t rt_pci_check_and_unmask_intx(struct rt_pci_device *pdev);
void rt_pci_irq_mask(struct rt_pci_device *pdev);
void rt_pci_irq_unmask(struct rt_pci_device *pdev);
#define RT_PCI_IRQ_F_LEGACY RT_BIT(0) /* Allow legacy interrupts */
#define RT_PCI_IRQ_F_MSI RT_BIT(1) /* Allow MSI interrupts */
#define RT_PCI_IRQ_F_MSIX RT_BIT(2) /* Allow MSI-X interrupts */
#define RT_PCI_IRQ_F_AFFINITY RT_BIT(3) /* Auto-assign affinity */
#define RT_PCI_IRQ_F_ALL_TYPES (RT_PCI_IRQ_F_LEGACY | RT_PCI_IRQ_F_MSI | RT_PCI_IRQ_F_MSIX)
#ifdef RT_PCI_MSI
rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)));
void rt_pci_free_vector(struct rt_pci_device *pdev);
rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev);
rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev);
rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)));
rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev);
rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev);
rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int min, int max,
RT_IRQ_AFFINITY_DECLARE((*affinities)));
#else
rt_inline rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
return -RT_ENOSYS;
}
rt_inline void rt_pci_free_vector(struct rt_pci_device *pdev)
{
return;
}
rt_inline rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev)
{
return 0;
}
rt_inline rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev)
{
return RT_EOK;
}
rt_inline rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
return -RT_ENOSYS;
}
rt_inline rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev)
{
return 0;
}
rt_inline rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev)
{
return RT_EOK;
}
rt_inline rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int min, int max,
RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
return -RT_ENOSYS;
}
#endif /* RT_PCI_MSI */
rt_inline void rt_pci_msix_entry_index_linear(struct rt_pci_msix_entry *entries,
rt_size_t nvectors)
{
for (int i = 0; i < nvectors; ++i)
{
entries[i].index = i;
}
}
rt_inline rt_ssize_t rt_pci_msi_enable_range(struct rt_pci_device *pdev,
int min, int max)
{
return rt_pci_msi_enable_range_affinity(pdev, min, max, RT_NULL);
}
rt_inline rt_err_t rt_pci_msi_enable(struct rt_pci_device *pdev)
{
rt_ssize_t res = rt_pci_msi_enable_range(pdev, 1, 1);
return res == 1 ? res : RT_EOK;
}
rt_inline rt_ssize_t rt_pci_msix_enable_range(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int min, int max)
{
return rt_pci_msix_enable_range_affinity(pdev, entries, min, max, RT_NULL);
}
rt_inline rt_ssize_t rt_pci_msix_enable(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int count)
{
return rt_pci_msix_enable_range(pdev, entries, count, count);
}
rt_err_t rt_pci_region_setup(struct rt_pci_host_bridge *host_bridge);
struct rt_pci_bus_region *rt_pci_region_alloc(struct rt_pci_host_bridge *host_bridge,
void **out_addr, rt_size_t size, rt_ubase_t flags, rt_bool_t mem64);
rt_err_t rt_pci_device_alloc_resource(struct rt_pci_host_bridge *host_bridge,
struct rt_pci_device *pdev);
void rt_pci_enum_device(struct rt_pci_bus *bus,
rt_bool_t (callback(struct rt_pci_device *, void *)), void *data);
const struct rt_pci_device_id *rt_pci_match_id(struct rt_pci_device *pdev,
const struct rt_pci_device_id *id);
const struct rt_pci_device_id *rt_pci_match_ids(struct rt_pci_device *pdev,
const struct rt_pci_device_id *ids);
rt_err_t rt_pci_driver_register(struct rt_pci_driver *pdrv);
rt_err_t rt_pci_device_register(struct rt_pci_device *pdev);
struct rt_pci_bus_resource *rt_pci_find_bar(struct rt_pci_device* pdev,rt_ubase_t flags,int index);
#define RT_PCI_DRIVER_EXPORT(driver) RT_DRIVER_EXPORT(driver, pci, BUILIN)
extern struct rt_spinlock rt_pci_lock;
#endif /* __PCI_H__ */

View File

@@ -0,0 +1,203 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#ifndef __PCI_ENDPOINT_H__
#define __PCI_ENDPOINT_H__
#include <drivers/pci.h>
enum rt_pci_ep_pin
{
RT_PCI_EP_PIN_UNKNOWN,
RT_PCI_EP_PIN_INTA,
RT_PCI_EP_PIN_INTB,
RT_PCI_EP_PIN_INTC,
RT_PCI_EP_PIN_INTD,
};
enum rt_pci_ep_irq
{
RT_PCI_EP_IRQ_UNKNOWN,
RT_PCI_EP_IRQ_LEGACY,
RT_PCI_EP_IRQ_MSI,
RT_PCI_EP_IRQ_MSIX,
};
struct rt_pci_ep_header
{
rt_uint16_t vendor;
rt_uint16_t device;
rt_uint8_t revision;
rt_uint8_t progif;
rt_uint8_t subclass;
rt_uint8_t class_code;
rt_uint8_t cache_line_size;
rt_uint16_t subsystem_vendor;
rt_uint16_t subsystem_device;
enum rt_pci_ep_pin intx;
};
struct rt_pci_ep_bar
{
/* To PCI Bus */
struct rt_pci_bus_resource bus;
/* To CPU */
rt_ubase_t cpu_addr;
};
/*
* Type of MSI-X table, For more format detail,
* please read `components/drivers/include/drivers/pci_msi.h`
*/
struct rt_pci_ep_msix_tbl
{
union
{
rt_uint64_t msg_addr;
struct
{
rt_uint32_t msg_addr_upper;
rt_uint32_t msg_addr_lower;
};
};
rt_uint32_t msg_data;
rt_uint32_t vector_ctrl;
};
struct rt_pci_ep_ops;
struct rt_pci_ep_mem;
struct rt_pci_ep
{
rt_list_t list;
const char *name;
struct rt_ref ref;
const struct rt_device *rc_dev;
const struct rt_pci_ep_ops *ops;
rt_size_t mems_nr;
struct rt_pci_ep_mem *mems;
rt_uint8_t max_functions;
RT_BITMAP_DECLARE(functions_map, 8);
rt_list_t epf_nodes;
struct rt_mutex lock;
void *priv;
};
struct rt_pci_ep_mem
{
rt_ubase_t cpu_addr;
rt_size_t size;
rt_size_t page_size;
rt_bitmap_t *map;
rt_size_t bits;
};
struct rt_pci_epf
{
rt_list_t list;
const char *name;
struct rt_pci_ep_header *header;
struct rt_pci_ep_bar bar[PCI_STD_NUM_BARS];
rt_uint8_t msi_interrupts;
rt_uint16_t msix_interrupts;
rt_uint8_t func_no;
struct rt_pci_ep *ep;
};
struct rt_pci_ep_ops
{
rt_err_t (*write_header)(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_header *hdr);
rt_err_t (*set_bar)(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx);
rt_err_t (*clear_bar)(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx);
rt_err_t (*map_addr)(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size);
rt_err_t (*unmap_addr)(struct rt_pci_ep *ep, rt_uint8_t func_no, rt_ubase_t addr);
rt_err_t (*set_msi)(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr);
rt_err_t (*get_msi)(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr);
rt_err_t (*set_msix)(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr, int bar_idx, rt_off_t offset);
rt_err_t (*get_msix)(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr);
rt_err_t (*raise_irq)(struct rt_pci_ep *ep, rt_uint8_t func_no,
enum rt_pci_ep_irq type, unsigned irq);
rt_err_t (*start)(struct rt_pci_ep *ep);
rt_err_t (*stop)(struct rt_pci_ep *ep);
};
rt_err_t rt_pci_ep_write_header(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_header *hdr);
rt_err_t rt_pci_ep_set_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx);
rt_err_t rt_pci_ep_clear_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx);
rt_err_t rt_pci_ep_map_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size);
rt_err_t rt_pci_ep_unmap_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr);
rt_err_t rt_pci_ep_set_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr);
rt_err_t rt_pci_ep_get_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr);
rt_err_t rt_pci_ep_set_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr, int bar_idx, rt_off_t offset);
rt_err_t rt_pci_ep_get_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr);
rt_err_t rt_pci_ep_raise_irq(struct rt_pci_ep *ep, rt_uint8_t func_no,
enum rt_pci_ep_irq type, unsigned irq);
rt_err_t rt_pci_ep_start(struct rt_pci_ep *ep);
rt_err_t rt_pci_ep_stop(struct rt_pci_ep *ep);
rt_err_t rt_pci_ep_register(struct rt_pci_ep *ep);
rt_err_t rt_pci_ep_unregister(struct rt_pci_ep *ep);
rt_err_t rt_pci_ep_mem_array_init(struct rt_pci_ep *ep,
struct rt_pci_ep_mem *mems, rt_size_t mems_nr);
rt_err_t rt_pci_ep_mem_init(struct rt_pci_ep *ep,
rt_ubase_t cpu_addr, rt_size_t size, rt_size_t page_size);
void *rt_pci_ep_mem_alloc(struct rt_pci_ep *ep,
rt_ubase_t *out_cpu_addr, rt_size_t size);
void rt_pci_ep_mem_free(struct rt_pci_ep *ep,
void *vaddr, rt_ubase_t cpu_addr, rt_size_t size);
rt_err_t rt_pci_ep_add_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf);
rt_err_t rt_pci_ep_remove_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf);
struct rt_pci_ep *rt_pci_ep_get(const char *name);
void rt_pci_ep_put(struct rt_pci_ep *ep);
#endif /* __PCI_ENDPOINT_H__ */

View File

@@ -0,0 +1,189 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#ifndef __PCI_MSI_H__
#define __PCI_MSI_H__
#include <drivers/pci.h>
/*
* MSI Format:
* T0: 32-bit Address
* T1: 64-bit Address
* T2: 32-bit Address with Per-Vector Masking
* T3: 64-bit Address with Per-Vector Masking
*
* 31 16 15 8 7 0
* +---------------------------+-----------------+---------------+
* | Message Control | Next Capability | Capability ID | DW0
* | | Pointer | (05h) |
* +---------------------------+-----------------+---------------+
* | Message Address [31:0] | DW1
* +-------------------------------------------------------------+
* | Message Address [63:32] | DW2 (T1: only 64-bit)
* +---------------------------+---------------------------------+
* | Reserved | Message Data | DW3
* +---------------------------+---------------------------------+
* | Mask Bits | DW4 (T2/T3: only with Per-Vector Masking)
* +-------------------------------------------------------------+
* | Pending Bits | DW5 (T2/T3: only with Per-Vector Masking)
* +-------------------------------------------------------------+
*
* MSI Message Control:
*
* 15 9 8 7 6 4 3 1 0
* +----------------------+---+---+---------------+----------+---+
* | Reserved | | | | | |
* +----------------------+---+---+---------------+----------+---+
* ^ ^ ^ ^ ^
* | | | | |
* | | | | +---- MSI Enable (RW)
* | | | +----------- Multiple Message Capable (RO, log2n, [n <= 5])
* | | +------------------------- Multiple Message Enable (RW, log2n, [n <= 5])
* | +----------------------------------- 64-bit Address Capable
* +--------------------------------------- Per-Vector Masking Capable
*/
struct rt_pci_msi_conf
{
rt_uint32_t mask;
rt_uint8_t mask_pos;
int default_irq;
struct
{
rt_uint8_t is_masking:1;
rt_uint8_t is_64bit:1;
rt_uint8_t multi_msg_max:3; /* log2 num of messages allocated */
rt_uint8_t multi_msg_use:3; /* log2 num of messages supported */
} cap;
};
/*
* MSI-X Format:
*
* 31 16 15 8 7 0
* +---------------------------+-----------------+---------------+
* | Message Control | Next Capability | Capability ID | DW0
* | | Pointer | (11h) |
* +---------------------------+-----------------+---+-----------+
* | MSI-X Table Offset | Table BIR | DW1 (BIR: BAR Index Register)
* +-------------------------------------------------+-----------+ |
* | Pending Bit Array (PBA) Offset | PBA BIR | DW2 --------+ |
* +-------------------------------------------------+-----------+ | |
* | |
* MSI-X Message Control: | |
* | |
* 15 14 13 11 10 0 | |
* +---+---+----------+------------------------------------------+ | |
* | | | Reserved | Table Size in N-1 (RO) | | |
* +---+---+----------+------------------------------------------+ | |
* ^ ^ | |
* | | | |
* | +---- Function Mask (RW) | |
* +-------- MSI-X Enable (RW) | |
* | |
* MSI-X Table (BAR[Table BIR] + MSI-X Table Offset): | |
* | |
* DW3 DW2 DW1 DW0 | |
* +----------------+--------------+---------------+---------------+ <---------|-+
* | Vector Control | Message Data | Upper Address | Lower Address | Entry 0 |
* +----------------+--------------+---------------+---------------+ |
* | Vector Control | Message Data | Upper Address | Lower Address | Entry 1 |
* +----------------+--------------+---------------+---------------+ |
* | ...... | ...... | ...... | ...... | |
* +----------------+--------------+---------------+---------------+ |
* | Vector Control | Message Data | Upper Address | Lower Address | Entry N-1 |
* +----------------+--------------+---------------+---------------+ |
* ^ |
* | |
* +---- Bit 0 is vector Mask Bit (R/W) |
* |
* MSI-X Pending Bit Array (BAR[PBA BIR] + Pending Bit Array Offset): |
* |
* DW1 DW0 |
* +-------------------------------+ <-----------------------------------------+
* | Pending Bits 0 - 63 | QW 0
* +-------------------------------+
* | Pending Bits 64 - 127 | QW 1
* +-------------------------------+
* | ...... |
* +-------------------------------+
* | Pending Bits | QW (N-1)/64
* +-------------------------------+
*/
struct rt_pci_msix_conf
{
int index;
rt_uint32_t msg_ctrl;
void *table_base;
};
struct rt_pci_msi_msg
{
rt_uint32_t address_lo;
rt_uint32_t address_hi;
rt_uint32_t data;
};
struct rt_pci_msi_desc
{
rt_list_t list;
int irq;
rt_size_t vector_used;
rt_size_t vector_count;
union
{
/* For MSI-X */
rt_bitmap_t *affinity;
/* For MSI */
rt_bitmap_t **affinities;
};
struct rt_pci_device *pdev;
struct rt_pci_msi_msg msg;
void *write_msi_msg_data;
void (*write_msi_msg)(struct rt_pci_msi_desc *, void *);
rt_bool_t is_msix;
union
{
struct rt_pci_msi_conf msi;
struct rt_pci_msix_conf msix;
};
void *priv;
};
#define rt_pci_msi_first_desc(pdev) \
(rt_list_isempty(&(pdev)->msi_desc_nodes) ? RT_NULL : \
rt_list_first_entry(&(pdev)->msi_desc_nodes, struct rt_pci_msi_desc, list))
#define rt_pci_msi_for_each_desc(pdev, desc) \
rt_list_for_each_entry(desc, &(pdev)->msi_desc_nodes, list)
#define rt_pci_msix_table_size(flags) ((flags & PCIM_MSIXCTRL_TABLE_SIZE) + 1)
rt_err_t rt_pci_msi_setup_irqs(struct rt_pci_device *pdev, int nvec, int type);
void rt_pci_msi_shutdown(struct rt_pci_device *pdev);
void rt_pci_msix_shutdown(struct rt_pci_device *pdev);
void rt_pci_msi_free_irqs(struct rt_pci_device *pdev);
void rt_pci_msi_write_msg(struct rt_pci_msi_desc *desc, struct rt_pci_msi_msg *msg);
void rt_pci_msi_mask_irq(struct rt_pic_irq *pirq);
void rt_pci_msi_unmask_irq(struct rt_pic_irq *pirq);
#endif /* __PCI_MSI_H__ */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -7,18 +7,172 @@
* Date Author Notes
* 2020-10-14 wangqiang the first version
* 2022-08-17 xjy198903 add 1000M definition
* 2024-10-08 zhujiale add phy v2.0
*/
#ifndef __PHY_H__
#define __PHY_H__
#ifndef __NET_PHY_H__
#define __NET_PHY_H__
#include <rtthread.h>
#include <drivers/core/driver.h>
#ifdef RT_USING_PHY_V2
#include <ofw.h>
#include <mdio.h>
#include <general_phy.h>
#define RT_PHY_FIXED_ID 0xa5a55a5a
#define RT_PHY_NCSI_ID 0xbeefcafe
/* Indicates what features are supported by the interface. */
#define RT_SUPPORTED_10baseT_Half (1 << 0)
#define RT_SUPPORTED_10baseT_Full (1 << 1)
#define RT_SUPPORTED_100baseT_Half (1 << 2)
#define RT_SUPPORTED_100baseT_Full (1 << 3)
#define RT_SUPPORTED_1000baseT_Half (1 << 4)
#define RT_SUPPORTED_1000baseT_Full (1 << 5)
#define RT_SUPPORTED_Autoneg (1 << 6)
#define RT_SUPPORTED_TP (1 << 7)
#define RT_SUPPORTED_AUI (1 << 8)
#define RT_SUPPORTED_MII (1 << 9)
#define RT_SUPPORTED_FIBRE (1 << 10)
#define RT_SUPPORTED_BNC (1 << 11)
#define RT_SUPPORTED_10000baseT_Full (1 << 12)
#define RT_SUPPORTED_Pause (1 << 13)
#define RT_SUPPORTED_Asym_Pause (1 << 14)
#define RT_SUPPORTED_2500baseX_Full (1 << 15)
#define RT_SUPPORTED_Backplane (1 << 16)
#define RT_SUPPORTED_1000baseKX_Full (1 << 17)
#define RT_SUPPORTED_10000baseKX4_Full (1 << 18)
#define RT_SUPPORTED_10000baseKR_Full (1 << 19)
#define RT_SUPPORTED_10000baseR_FEC (1 << 20)
#define RT_SUPPORTED_1000baseX_Half (1 << 21)
#define RT_SUPPORTED_1000baseX_Full (1 << 22)
#define RT_PHY_FLAG_BROKEN_RESET (1 << 0) /* soft reset not supported */
#define RT_PHY_DEFAULT_FEATURES (RT_SUPPORTED_Autoneg | RT_SUPPORTED_TP | RT_SUPPORTED_MII)
#define RT_PHY_10BT_FEATURES (RT_SUPPORTED_10baseT_Half | RT_SUPPORTED_10baseT_Full)
#define RT_PHY_100BT_FEATURES (RT_SUPPORTED_100baseT_Half | RT_SUPPORTED_100baseT_Full)
#define RT_PHY_1000BT_FEATURES (RT_SUPPORTED_1000baseT_Half | RT_SUPPORTED_1000baseT_Full)
#define RT_PHY_BASIC_FEATURES (RT_PHY_10BT_FEATURES | RT_PHY_100BT_FEATURES | RT_PHY_DEFAULT_FEATURES)
#define RT_PHY_GBIT_FEATURES (RT_PHY_BASIC_FEATURES | RT_PHY_1000BT_FEATURES)
#define RT_PHY_10G_FEATURES (RT_PHY_GBIT_FEATURES | RT_SUPPORTED_10000baseT_Full)
struct rt_phy_device
{
struct rt_device parent;
struct mii_bus *bus;
struct rt_phy_driver *drv;
rt_uint32_t phy_id;
rt_uint32_t mmds;
int speed;
int duplex;
int link;
int port;
rt_uint32_t advertising;
rt_uint32_t supported;
rt_bool_t autoneg;
int pause;
rt_ubase_t addr;
rt_bool_t is_c45;
rt_uint32_t flags;
rt_phy_interface interface;
#ifdef RT_USING_OFW
struct rt_ofw_node *node;
#endif
void *priv;
};
struct rt_phy_driver
{
struct rt_driver parent;
char name[RT_NAME_MAX];
rt_uint64_t uid;
rt_uint64_t mask;
rt_uint64_t mmds;
rt_uint32_t features;
int (*probe)(struct rt_phy_device *phydev);
int (*config)(struct rt_phy_device *phydev);
int (*startup)(struct rt_phy_device *phydev);
int (*shutdown)(struct rt_phy_device *phydev);
int (*read)(struct rt_phy_device *phydev, int addr, int devad, int reg);
int (*write)(struct rt_phy_device *phydev, int addr, int devad, int reg,
rt_uint16_t val);
int (*read_mmd)(struct rt_phy_device *phydev, int devad, int reg);
int (*write_mmd)(struct rt_phy_device *phydev, int devad, int reg,
rt_uint16_t val);
/* driver private data */
void *data;
};
int rt_phy_read(struct rt_phy_device *phydev, int devad, int regnum);
int rt_phy_write(struct rt_phy_device *phydev, int devad, int regnum, rt_uint16_t val);
int rt_phy_read_mmd(struct rt_phy_device *phydev, int devad, int regnum);
int rt_phy_write_mmd(struct rt_phy_device *phydev, int devad, int regnum, rt_uint16_t val);
int rt_phy_reset(struct rt_phy_device *phydev);
int rt_phy_startup(struct rt_phy_device *phydev);
int rt_phy_config(struct rt_phy_device *phydev);
int rt_phy_shutdown(struct rt_phy_device *phydev);
int rt_phy_read_mmd(struct rt_phy_device *phydev, int devad, int regnum);
int rt_phy_set_supported(struct rt_phy_device *phydev, rt_uint32_t max_speed);
void rt_phy_mmd_start_indirect(struct rt_phy_device *phydev, int devad, int regnum);
rt_err_t rt_phy_device_register(struct rt_phy_device *pdev);
rt_err_t rt_phy_driver_register(struct rt_phy_driver *pdrv);
rt_err_t rt_ofw_get_phyid(struct rt_ofw_node *np,rt_uint32_t *id);
struct rt_phy_device *rt_phy_device_create(struct mii_bus *bus, int addr, rt_uint32_t phy_id, rt_bool_t is_c45);
struct rt_phy_device *rt_phy_find_by_mask(struct mii_bus *bus, unsigned int phy_mask);
struct rt_phy_device *rt_ofw_create_phy(struct mii_bus *bus, struct rt_ofw_node *np, int phyaddr);
struct rt_phy_device *rt_phy_get_device(struct mii_bus *bus, struct rt_ofw_node *np, int addr, rt_phy_interface interface);
#define RT_PHY_DEVICE_REGISTER(phy_dev) \
static int rt_##phy_dev##_register(void) \
{ \
rt_phy_device_register(&phy_dev); \
return 0; \
} \
INIT_PREV_EXPORT(rt_##phy_dev##_register);
#define RT_PHY_DRIVER_REGISTER(phy_drv) \
static int rt_##phy_drv##_register(void) \
{ \
rt_phy_driver_register(&phy_drv); \
return 0; \
} \
INIT_PREV_EXPORT(rt_##phy_drv##_register);
#endif
#ifdef RT_USING_PHY
#ifdef __cplusplus
extern "C"
{
#endif
struct rt_mdio_bus_ops
{
rt_bool_t (*init)(void *bus, rt_uint32_t src_clock_hz);
rt_size_t (*read)(void *bus, rt_uint32_t addr, rt_uint32_t reg, void *data, rt_uint32_t size);
rt_size_t (*write)(void *bus, rt_uint32_t addr, rt_uint32_t reg, void *data, rt_uint32_t size);
rt_bool_t (*uninit)(void *bus);
};
struct rt_mdio_bus
{
void *hw_obj;
char *name;
struct rt_mdio_bus_ops *ops;
};
typedef struct rt_mdio_bus rt_mdio_t;
/* Defines the PHY link speed. This is align with the speed for MAC. */
#define PHY_SPEED_10M 0U /* PHY 10M speed. */
#define PHY_SPEED_100M 1U /* PHY 100M speed. */
@@ -67,5 +221,5 @@ rt_err_t rt_hw_phy_register(struct rt_phy_device *phy, const char *name);
#ifdef __cplusplus
}
#endif
#endif /* __PHY_H__*/
#endif
#endif

View File

@@ -0,0 +1,91 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#ifndef __PHYE_H__
#define __PHYE_H__
#include <rtthread.h>
#include <drivers/ofw.h>
enum rt_phye_mode
{
RT_PHYE_MODE_INVALID,
RT_PHYE_MODE_USB_HOST,
RT_PHYE_MODE_USB_HOST_LS,
RT_PHYE_MODE_USB_HOST_FS,
RT_PHYE_MODE_USB_HOST_HS,
RT_PHYE_MODE_USB_HOST_SS,
RT_PHYE_MODE_USB_DEVICE,
RT_PHYE_MODE_USB_DEVICE_LS,
RT_PHYE_MODE_USB_DEVICE_FS,
RT_PHYE_MODE_USB_DEVICE_HS,
RT_PHYE_MODE_USB_DEVICE_SS,
RT_PHYE_MODE_USB_OTG,
RT_PHYE_MODE_UFS_HS_A,
RT_PHYE_MODE_UFS_HS_B,
RT_PHYE_MODE_PCIE,
RT_PHYE_MODE_ETHERNET,
RT_PHYE_MODE_MIPI_DPHY,
RT_PHYE_MODE_SATA,
RT_PHYE_MODE_LVDS,
RT_PHYE_MODE_DP,
RT_PHYE_MODE_MAX,
/* PCIe */
RT_PHYE_MODE_PCIE_RC = RT_PHYE_MODE_MAX,
RT_PHYE_MODE_PCIE_EP,
RT_PHYE_MODE_PCIE_BIFURCATION,
};
struct rt_phye_ops;
struct rt_phye
{
struct rt_device *dev;
const struct rt_phye_ops *ops;
int init_count;
int power_count;
struct rt_spinlock lock;
};
struct rt_phye_ops
{
rt_err_t (*init)(struct rt_phye *phye);
rt_err_t (*exit)(struct rt_phye *phye);
rt_err_t (*reset)(struct rt_phye *phye);
rt_err_t (*power_on)(struct rt_phye *phye);
rt_err_t (*power_off)(struct rt_phye *phye);
rt_err_t (*set_mode)(struct rt_phye *phye, enum rt_phye_mode mode, int submode);
rt_err_t (*ofw_parse)(struct rt_phye *phye, struct rt_ofw_cell_args *phye_args);
};
rt_err_t rt_phye_register(struct rt_phye *phye);
rt_err_t rt_phye_unregister(struct rt_phye *phye);
rt_err_t rt_phye_init(struct rt_phye *phye);
rt_err_t rt_phye_exit(struct rt_phye *phye);
rt_err_t rt_phye_reset(struct rt_phye *phye);
rt_err_t rt_phye_power_on(struct rt_phye *phye);
rt_err_t rt_phye_power_off(struct rt_phye *phye);
rt_err_t rt_phye_set_mode(struct rt_phye *phye, enum rt_phye_mode mode, int submode);
rt_inline rt_err_t rt_phye_set_mode_simple(struct rt_phye *phye, enum rt_phye_mode mode)
{
return rt_phye_set_mode(phye, mode, RT_PHYE_MODE_INVALID);
}
struct rt_phye *rt_phye_get_by_index(struct rt_device *dev, int index);
struct rt_phye *rt_phye_get_by_name(struct rt_device *dev, const char *id);
void rt_phye_put(struct rt_phye *phye);
#endif /* __PHYE_H__ */

View File

@@ -148,6 +148,7 @@ void rt_pic_default_name(struct rt_pic *pic);
struct rt_pic *rt_pic_dynamic_cast(void *ptr);
rt_err_t rt_pic_linear_irq(struct rt_pic *pic, rt_size_t irq_nr);
rt_err_t rt_pic_cancel_irq(struct rt_pic *pic);
int rt_pic_config_ipi(struct rt_pic *pic, int ipi_index, int hwirq);
int rt_pic_config_irq(struct rt_pic *pic, int irq_index, int hwirq);

View File

@@ -12,10 +12,7 @@
#ifndef __PLATFORM_H__
#define __PLATFORM_H__
#ifdef RT_USING_OFW
#include <drivers/ofw.h>
#endif
#include <drivers/core/driver.h>
struct rt_platform_device
@@ -25,10 +22,7 @@ struct rt_platform_device
int dev_id;
const char *name;
#ifdef RT_USING_OFW
const struct rt_ofw_node_id *id;
#endif
void *priv;
};
@@ -38,10 +32,7 @@ struct rt_platform_driver
struct rt_driver parent;
const char *name;
#ifdef RT_USING_OFW
const struct rt_ofw_node_id *ids;
#endif
rt_err_t (*probe)(struct rt_platform_device *pdev);
rt_err_t (*remove)(struct rt_platform_device *pdev);
@@ -54,6 +45,7 @@ rt_err_t rt_platform_driver_register(struct rt_platform_driver *pdrv);
rt_err_t rt_platform_device_register(struct rt_platform_device *pdev);
rt_err_t rt_platform_ofw_device_probe_child(struct rt_ofw_node *np);
rt_err_t rt_platform_ofw_request(struct rt_ofw_node *np);
rt_err_t rt_platform_ofw_free(struct rt_platform_device *pdev);
#define RT_PLATFORM_DRIVER_EXPORT(driver) RT_DRIVER_EXPORT(driver, platform, BUILIN)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -10,6 +10,7 @@
* 2019-04-28 Zero-Free improve PM mode and device ops interface
* 2020-11-23 zhangsz update pm mode select
* 2020-11-27 zhangsz update pm 2.0
* 2024-07-04 wdfk-prog The device is registered and uninstalled by linked list
*/
#ifndef __PM_H__
@@ -134,15 +135,16 @@ struct rt_pm_ops
struct rt_device_pm_ops
{
int (*suspend)(const struct rt_device *device, rt_uint8_t mode);
rt_err_t (*suspend)(const struct rt_device *device, rt_uint8_t mode);
void (*resume)(const struct rt_device *device, rt_uint8_t mode);
int (*frequency_change)(const struct rt_device *device, rt_uint8_t mode);
rt_err_t (*frequency_change)(const struct rt_device *device, rt_uint8_t mode);
};
struct rt_device_pm
{
const struct rt_device *device;
const struct rt_device_pm_ops *ops;
rt_slist_t list;
};
struct rt_pm_module
@@ -172,7 +174,7 @@ struct rt_pm
rt_uint32_t sleep_status[PM_SLEEP_MODE_MAX - 1][(PM_MODULE_MAX_ID + 31) / 32];
/* the list of device, which has PM feature */
rt_uint8_t device_pm_number;
rt_slist_t device_list;
struct rt_device_pm *device_pm;
/* if the mode has timer, the corresponding bit is 1*/
@@ -194,10 +196,10 @@ struct rt_pm_notify
void *data;
};
void rt_pm_request(rt_uint8_t sleep_mode);
void rt_pm_release(rt_uint8_t sleep_mode);
void rt_pm_release_all(rt_uint8_t sleep_mode);
int rt_pm_run_enter(rt_uint8_t run_mode);
rt_err_t rt_pm_request(rt_uint8_t sleep_mode);
rt_err_t rt_pm_release(rt_uint8_t sleep_mode);
rt_err_t rt_pm_release_all(rt_uint8_t sleep_mode);
rt_err_t rt_pm_run_enter(rt_uint8_t run_mode);
void rt_pm_device_register(struct rt_device *device, const struct rt_device_pm_ops *ops);
void rt_pm_device_unregister(struct rt_device *device);
@@ -208,22 +210,22 @@ void rt_pm_default_set(rt_uint8_t sleep_mode);
void rt_system_pm_init(const struct rt_pm_ops *ops,
rt_uint8_t timer_mask,
void *user_data);
void rt_pm_module_request(uint8_t module_id, rt_uint8_t sleep_mode);
void rt_pm_module_release(uint8_t module_id, rt_uint8_t sleep_mode);
void rt_pm_module_release_all(uint8_t module_id, rt_uint8_t sleep_mode);
rt_err_t rt_pm_module_request(uint8_t module_id, rt_uint8_t sleep_mode);
rt_err_t rt_pm_module_release(uint8_t module_id, rt_uint8_t sleep_mode);
rt_err_t rt_pm_module_release_all(uint8_t module_id, rt_uint8_t sleep_mode);
void rt_pm_module_delay_sleep(rt_uint8_t module_id, rt_tick_t timeout);
rt_uint32_t rt_pm_module_get_status(void);
rt_uint8_t rt_pm_get_sleep_mode(void);
struct rt_pm *rt_pm_get_handle(void);
/* sleep : request or release */
void rt_pm_sleep_request(rt_uint16_t module_id, rt_uint8_t mode);
void rt_pm_sleep_release(rt_uint16_t module_id, rt_uint8_t mode);
void rt_pm_sleep_none_request(rt_uint16_t module_id);
void rt_pm_sleep_none_release(rt_uint16_t module_id);
void rt_pm_sleep_idle_request(rt_uint16_t module_id);
void rt_pm_sleep_idle_release(rt_uint16_t module_id);
void rt_pm_sleep_light_request(rt_uint16_t module_id);
void rt_pm_sleep_light_release(rt_uint16_t module_id);
rt_err_t rt_pm_sleep_request(rt_uint16_t module_id, rt_uint8_t mode);
rt_err_t rt_pm_sleep_release(rt_uint16_t module_id, rt_uint8_t mode);
rt_err_t rt_pm_sleep_none_request(rt_uint16_t module_id);
rt_err_t rt_pm_sleep_none_release(rt_uint16_t module_id);
rt_err_t rt_pm_sleep_idle_request(rt_uint16_t module_id);
rt_err_t rt_pm_sleep_idle_release(rt_uint16_t module_id);
rt_err_t rt_pm_sleep_light_request(rt_uint16_t module_id);
rt_err_t rt_pm_sleep_light_release(rt_uint16_t module_id);
#endif /* __PM_H__ */

View File

@@ -0,0 +1,153 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-23 GuEe-GUI first version
*/
#ifndef __REGULATOR_H__
#define __REGULATOR_H__
#include <ref.h>
#include <rthw.h>
#include <rtthread.h>
#include <drivers/misc.h>
#define RT_REGULATOR_UVOLT_INVALID (((int)(RT_UINT32_MAX >> 1)))
struct rt_regulator_param
{
const char *name;
int min_uvolt; /* In uV */
int max_uvolt; /* In uV */
int min_uamp; /* In uA */
int max_uamp; /* In uA */
int ramp_delay; /* In uV/usec */
int enable_delay; /* In usec */
int off_on_delay; /* In usec */
rt_uint32_t enable_active_high:1;
rt_uint32_t boot_on:1; /* Is enabled on boot */
rt_uint32_t always_on:1; /* Must be enabled */
rt_uint32_t soft_start:1; /* Ramp voltage slowly */
rt_uint32_t pull_down:1; /* Pull down resistor when regulator off */
rt_uint32_t over_current_protection:1; /* Auto disable on over current */
};
struct rt_regulator_ops;
struct rt_regulator_node
{
rt_list_t list;
rt_list_t children_nodes;
struct rt_device *dev;
struct rt_regulator_node *parent;
const char *supply_name;
const struct rt_regulator_ops *ops;
struct rt_ref ref;
rt_atomic_t enabled_count;
const struct rt_regulator_param *param;
rt_list_t notifier_nodes;
void *priv;
};
/*
* NOTE: Power regulator control is dangerous work. We don't want non-internal
* consumer could access the power regulator tree without regulator's API. So
* we defined the `rt_regulator` member in core instead of here.
*/
struct rt_regulator;
#define RT_REGULATOR_MODE_INVALID 0
#define RT_REGULATOR_MODE_FAST RT_BIT(0)
#define RT_REGULATOR_MODE_NORMAL RT_BIT(1)
#define RT_REGULATOR_MODE_IDLE RT_BIT(2)
#define RT_REGULATOR_MODE_STANDBY RT_BIT(3)
struct rt_regulator_ops
{
rt_err_t (*enable)(struct rt_regulator_node *reg);
rt_err_t (*disable)(struct rt_regulator_node *reg);
rt_bool_t (*is_enabled)(struct rt_regulator_node *reg);
rt_err_t (*set_voltage)(struct rt_regulator_node *reg, int min_uvolt, int max_uvolt);
int (*get_voltage)(struct rt_regulator_node *reg);
rt_err_t (*set_mode)(struct rt_regulator_node *reg, rt_uint32_t mode);
rt_int32_t (*get_mode)(struct rt_regulator_node *reg);
rt_err_t (*set_ramp_delay)(struct rt_regulator_node *reg, int ramp);
rt_uint32_t (*enable_time)(struct rt_regulator_node *reg);
};
struct rt_regulator_notifier;
#define RT_REGULATOR_MSG_ENABLE RT_BIT(0)
#define RT_REGULATOR_MSG_DISABLE RT_BIT(1)
#define RT_REGULATOR_MSG_VOLTAGE_CHANGE RT_BIT(2)
#define RT_REGULATOR_MSG_VOLTAGE_CHANGE_ERR RT_BIT(3)
union rt_regulator_notifier_args
{
struct
{
int old_uvolt;
int min_uvolt;
int max_uvolt;
};
};
typedef rt_err_t (*rt_regulator_notifier_callback)(struct rt_regulator_notifier *notifier,
rt_ubase_t msg, void *data);
struct rt_regulator_notifier
{
rt_list_t list;
struct rt_regulator *regulator;
rt_regulator_notifier_callback callback;
void *priv;
};
rt_err_t rt_regulator_register(struct rt_regulator_node *reg_np);
rt_err_t rt_regulator_unregister(struct rt_regulator_node *reg_np);
rt_err_t rt_regulator_notifier_register(struct rt_regulator *reg,
struct rt_regulator_notifier *notifier);
rt_err_t rt_regulator_notifier_unregister(struct rt_regulator *reg,
struct rt_regulator_notifier *notifier);
struct rt_regulator *rt_regulator_get(struct rt_device *dev, const char *id);
void rt_regulator_put(struct rt_regulator *reg);
rt_err_t rt_regulator_enable(struct rt_regulator *reg);
rt_err_t rt_regulator_disable(struct rt_regulator *reg);
rt_bool_t rt_regulator_is_enabled(struct rt_regulator *reg);
rt_bool_t rt_regulator_is_supported_voltage(struct rt_regulator *reg, int min_uvolt, int max_uvolt);
rt_err_t rt_regulator_set_voltage(struct rt_regulator *reg, int min_uvolt, int max_uvolt);
int rt_regulator_get_voltage(struct rt_regulator *reg);
rt_err_t rt_regulator_set_mode(struct rt_regulator *reg, rt_uint32_t mode);
rt_int32_t rt_regulator_get_mode(struct rt_regulator *reg);
rt_inline rt_err_t rt_regulator_set_voltage_triplet(struct rt_regulator *reg,
int min_uvolt, int target_uvolt, int max_uvolt)
{
if (!rt_regulator_set_voltage(reg, target_uvolt, max_uvolt))
{
return RT_EOK;
}
return rt_regulator_set_voltage(reg, min_uvolt, max_uvolt);
}
#endif /* __REGULATOR_H__ */

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-26 GuEe-GUI first version
*/
#ifndef __RESET_H__
#define __RESET_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/ofw.h>
#define RT_RESET_CONTROLLER_OBJ_NAME "RSTC"
struct rt_reset_control_ops;
struct rt_reset_controller
{
struct rt_object parent;
rt_list_t rstc_nodes;
const char *name;
const struct rt_reset_control_ops *ops;
struct rt_ofw_node *ofw_node;
void *priv;
struct rt_spinlock spinlock;
};
struct rt_reset_control
{
rt_list_t list;
struct rt_reset_controller *rstcer;
int id;
const char *con_id;
rt_bool_t is_array;
void *priv;
};
struct rt_reset_control_ops
{
/*
* rt_ofw_cell_args return:
* args[0] = rstc.id
*/
rt_err_t (*ofw_parse)(struct rt_reset_control *rstc, struct rt_ofw_cell_args *args);
/* API */
rt_err_t (*reset)(struct rt_reset_control *rstc);
rt_err_t (*assert)(struct rt_reset_control *rstc);
rt_err_t (*deassert)(struct rt_reset_control *rstc);
int (*status)(struct rt_reset_control *rstc);
};
rt_err_t rt_reset_controller_register(struct rt_reset_controller *rstcer);
rt_err_t rt_reset_controller_unregister(struct rt_reset_controller *rstcer);
rt_err_t rt_reset_control_reset(struct rt_reset_control *rstc);
rt_err_t rt_reset_control_assert(struct rt_reset_control *rstc);
rt_err_t rt_reset_control_deassert(struct rt_reset_control *rstc);
int rt_reset_control_status(struct rt_reset_control *rstc);
rt_ssize_t rt_reset_control_get_count(struct rt_device *dev);
struct rt_reset_control *rt_reset_control_get_array(struct rt_device *dev);
struct rt_reset_control *rt_reset_control_get_by_index(struct rt_device *dev, int index);
struct rt_reset_control *rt_reset_control_get_by_name(struct rt_device *dev, const char *name);
void rt_reset_control_put(struct rt_reset_control *rstc);
struct rt_reset_control *rt_ofw_get_reset_control_array(struct rt_ofw_node *np);
struct rt_reset_control *rt_ofw_get_reset_control_by_index(struct rt_ofw_node *np, int index);
struct rt_reset_control *rt_ofw_get_reset_control_by_name(struct rt_ofw_node *np, const char *name);
#endif /* __RESET_H__ */

View File

@@ -0,0 +1,461 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#ifndef __SCSI_H__
#define __SCSI_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/byteorder.h>
#define RT_SCSI_LUN_SHIFT 5
rt_packed(struct rt_scsi_unknow
{
rt_uint8_t opcode;
});
rt_packed(struct rt_scsi_test_unit_ready
{
rt_uint8_t opcode;
rt_uint8_t reserved[4];
rt_uint8_t control;
rt_uint8_t pad[6]; /* To be ATAPI compatible */
});
rt_packed(struct rt_scsi_inquiry
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-2 Reserved, 1 Obsolete Formerly CMDDT, 0 EVPD */
rt_uint8_t page; /* Page code if EVPD=1 */
rt_uint8_t reserved;
rt_uint8_t alloc_length;
rt_uint8_t control;
rt_uint8_t pad[6]; /* To be ATAPI compatible */
});
rt_packed(struct rt_scsi_inquiry_data
{
#define RT_SCSI_DEVTYPE_MASK 31
rt_uint8_t devtype;
#define RT_SCSI_REMOVABLE_BIT 7
rt_uint8_t rmb;
rt_uint8_t reserved[2];
rt_uint8_t length;
rt_uint8_t reserved1[3];
char vendor[8];
char prodid[16];
char prodrev[4];
});
rt_packed(struct rt_scsi_request_sense
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-2 Reserved, 1 Obsolete, 0 SP */
rt_uint8_t reserved[2];
rt_uint8_t alloc_length;
rt_uint8_t control;
rt_uint8_t pad[6]; /* To be ATAPI compatible */
});
rt_packed(struct rt_scsi_request_sense_data
{
rt_uint8_t error_code; /* 7 Valid, 6-0 Err. code */
rt_uint8_t segment_number;
rt_uint8_t sense_key; /* 7 FileMark, 6 EndOfMedia, 5 ILI, 4-0 sense key */
rt_be32_t information;
rt_uint8_t additional_sense_length;
rt_be32_t cmd_specific_info;
rt_uint8_t additional_sense_code;
rt_uint8_t additional_sense_code_qualifier;
rt_uint8_t field_replaceable_unit_code;
rt_uint8_t sense_key_specific[3];
});
rt_packed(struct rt_scsi_read_capacity10
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-1 Reserved, 0 Obsolete */
rt_be32_t logical_block_addr; /* only if PMI=1 */
rt_uint8_t reserved[2];
rt_uint8_t pmi;
rt_uint8_t control;
rt_be16_t pad; /* To be ATAPI compatible */
});
rt_packed(struct rt_scsi_read_capacity10_data
{
rt_be32_t last_block;
rt_be32_t block_size;
});
rt_packed(struct rt_scsi_read_capacity16
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 Reserved, 4-0 SERVICE ACTION 0x10 */
rt_be64_t logical_block_addr; /* only if PMI=1 */
rt_be32_t alloc_len;
rt_uint8_t pmi;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_read_capacity16_data
{
rt_be64_t last_block;
rt_be32_t block_size;
rt_uint8_t pad[20];
});
rt_packed(struct rt_scsi_read10
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 RDPROTECT, 4 DPO, 3 FUA, 2 RARC, 1 Obsolete, 0 Obsolete */
rt_be32_t lba;
rt_uint8_t reserved;
rt_be16_t size;
rt_uint8_t reserved2;
rt_be16_t pad;
});
rt_packed(struct rt_scsi_read12
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 RDPROTECT, 4 DPO, 3 FUA, 2 RARC, 1 Obsolete, 0 Obsolete */
rt_be32_t lba;
rt_be32_t size;
rt_uint8_t reserved;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_read16
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 RDPROTECT, 4 DPO, 3 FUA, 2 RARC, 1 Obsolete, 0 DLD2 */
rt_be64_t lba;
rt_be32_t size;
rt_uint8_t reserved;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_write10
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 WRPROTECT, 4 DPO, 3 FUA, 2 Reserved, 1 Obsolete, 0 Obsolete */
rt_be32_t lba;
rt_uint8_t reserved;
rt_be16_t size;
rt_uint8_t reserved2;
rt_be16_t pad;
});
rt_packed(struct rt_scsi_write12
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 WRPROTECT, 4 DPO, 3 FUA, 2 Reserved, 1 Obsolete, 0 Obsolete */
rt_be32_t lba;
rt_be32_t size;
rt_uint8_t reserved;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_write16
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 WRPROTECT, 4 DPO, 3 FUA, 2 Reserved, 1 Obsolete, 0 DLD2 */
rt_be64_t lba;
rt_be32_t size;
rt_uint8_t reserved;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_synchronize_cache10
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-3 Reserved, 2 Obsolete, 1 IMMED, 0 Obsolete */
rt_be32_t lba;
rt_uint8_t reserved;
rt_be16_t size;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_synchronize_cache16
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-3 Reserved, 2 Obsolete, 1 IMMED, 0 Obsolete */
rt_be64_t lba;
rt_be32_t size;
rt_uint8_t reserved;
rt_uint8_t control;
});
#define RT_SCSI_UNMAP_SHIFT 3
rt_packed(struct rt_scsi_write_same10
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 WRPROTECT, 4 ANCHOR, 3 UNMAP, 2 Obsolete, 1 Obsolete, 0 Obsolete */
rt_be32_t lba;
rt_uint8_t reserved;
rt_be16_t size;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_write_same16
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 WRPROTECT, 4 ANCHOR, 3 UNMAP, 2 Obsolete, 1 Obsolete, 0 NDOB */
rt_be64_t lba;
rt_be32_t size;
rt_uint8_t reserved;
rt_uint8_t control;
});
#define RT_SCSI_PF_SHIFT 4
#define RT_SCSI_RTD_SHIFT 1
#define RT_SCSI_SP_SHIFT 0
rt_packed(struct rt_scsi_mode_select6
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 Reserved, 4 PF, 3-2 Reserved, 1 RTD, 0 SP */
rt_uint8_t reserved[2];
rt_uint8_t param_list_len;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_mode_select10
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 Reserved, 4 PF, 3-1 Reserved, 0 SP */
rt_uint8_t reserved[5];
rt_be16_t param_list_len;
rt_uint8_t control;
});
struct rt_scsi_mode_select_data
{
rt_uint32_t length;
rt_uint16_t block_descriptor_length;
rt_uint8_t medium_type;
rt_uint8_t device_specific;
rt_uint8_t header_length;
rt_uint8_t longlba:1;
};
#define RT_SCSI_DBD_SHIFT 3
#define RT_SCSI_LLBAA_SHIFT 4
#define RT_SCSI_PC_SHIFT 6
#define RT_SCSI_PAGE_CODE_SHIFT 0
rt_packed(struct rt_scsi_mode_sense6
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-4 Reserved, 3 DBD, 2-0 Reserved */
rt_uint8_t page_control_code;
rt_uint8_t subpage_code;
rt_uint8_t allocation_len;
rt_uint8_t control;
});
rt_packed(struct rt_scsi_mode_sense10
{
rt_uint8_t opcode;
rt_uint8_t config; /* 7-5 Reserved, 4 LLBAA, 3 DBD, 2-0 Reserved */
rt_uint8_t page_control_code;
rt_uint8_t subpage_code;
rt_uint8_t reserved[3];
rt_be16_t allocation_len;
rt_uint8_t control;
});
#define RT_SCSI_CMD_TEST_UNIT_READY 0x00
#define RT_SCSI_CMD_REQUEST_SENSE 0x03
#define RT_SCSI_CMD_INQUIRY 0x12
#define RT_SCSI_CMD_MODE_SELECT 0x15
#define RT_SCSI_CMD_MODE_SENSE 0x1a
#define RT_SCSI_CMD_READ_CAPACITY10 0x25
#define RT_SCSI_CMD_READ10 0x28
#define RT_SCSI_CMD_WRITE10 0x2a
#define RT_SCSI_CMD_SYNCHRONIZE_CACHE10 0x35
#define RT_SCSI_CMD_WRITE_SAME10 0x41
#define RT_SCSI_CMD_MODE_SELECT10 0x55
#define RT_SCSI_CMD_MODE_SENSE10 0x5a
#define RT_SCSI_CMD_READ16 0x88
#define RT_SCSI_CMD_WRITE16 0x8a
#define RT_SCSI_CMD_SYNCHRONIZE_CACHE16 0x91
#define RT_SCSI_CMD_WRITE_SAME16 0x93
#define RT_SCSI_CMD_READ_CAPACITY16 0x9e
#define RT_SCSI_CMD_READ12 0xa8
#define RT_SCSI_CMD_WRITE12 0xaa
struct rt_scsi_cmd
{
union
{
struct rt_scsi_unknow unknow;
struct rt_scsi_test_unit_ready test_unit_ready;
struct rt_scsi_inquiry inquiry;
struct rt_scsi_request_sense request_sense;
struct rt_scsi_read_capacity10 read_capacity10;
struct rt_scsi_read_capacity16 read_capacity16;
struct rt_scsi_read10 read10;
struct rt_scsi_read12 read12;
struct rt_scsi_read16 read16;
struct rt_scsi_write10 write10;
struct rt_scsi_write12 write12;
struct rt_scsi_write16 write16;
struct rt_scsi_synchronize_cache10 synchronize_cache10;
struct rt_scsi_synchronize_cache16 synchronize_cache16;
struct rt_scsi_write_same10 write_same10;
struct rt_scsi_write_same16 write_same16;
struct rt_scsi_mode_select6 mode_select6;
struct rt_scsi_mode_select10 mode_select10;
struct rt_scsi_mode_sense6 mode_sense6;
struct rt_scsi_mode_sense10 mode_sense10;
} op;
rt_size_t op_size;
union
{
struct
{
struct rt_scsi_inquiry_data inquiry;
struct rt_scsi_request_sense_data request_sense;
struct rt_scsi_read_capacity10_data read_capacity10;
struct rt_scsi_read_capacity16_data read_capacity16;
};
struct
{
void *ptr;
rt_size_t size;
};
} data;
};
enum
{
SCSI_DEVICE_TYPE_DIRECT = 0x00, /* DiskPeripheral (GenDisk) */
SCSI_DEVICE_TYPE_SEQUENTIAL = 0x01, /* TapePeripheral */
SCSI_DEVICE_TYPE_PRINTER = 0x02, /* PrinterPeripheral (GenPrinter) */
SCSI_DEVICE_TYPE_PROCESSOR = 0x03, /* OtherPeripheral */
SCSI_DEVICE_TYPE_WRITE_ONCE_READ_MULTIPLE = 0x04, /* WormPeripheral (GenWorm) */
SCSI_DEVICE_TYPE_CDROM = 0x05, /* CdRomPeripheral (GenCdRom) */
SCSI_DEVICE_TYPE_SCANNER = 0x06, /* ScannerPeripheral (GenScanner) */
SCSI_DEVICE_TYPE_OPTICAL = 0x07, /* OpticalDiskPeripheral (GenOptical) */
SCSI_DEVICE_TYPE_MEDIUM_CHANGER = 0x08, /* MediumChangerPeripheral (ScsiChanger) */
SCSI_DEVICE_TYPE_COMMUNICATION = 0x09, /* CommunicationsPeripheral (ScsiNet) */
SCSI_DEVICE_TYPE_ASC_PREPRESS_GRAPHICS10 = 0x0a, /* ASCPrePressGraphicsPeripheral (ScsiASCIT8) */
SCSI_DEVICE_TYPE_ASC_PREPRESS_GRAPHICS11 = 0x0b, /* ASCPrePressGraphicsPeripheral (ScsiASCIT8) */
SCSI_DEVICE_TYPE_ARRAY = 0x0c, /* ArrayPeripheral (ScsiArray) */
SCSI_DEVICE_TYPE_ENCLOSURE = 0x0d, /* EnclosurePeripheral (ScsiEnclosure) */
SCSI_DEVICE_TYPE_RBC = 0x0e, /* RBCPeripheral (ScsiRBC) */
SCSI_DEVICE_TYPE_CARDREADER = 0x0f, /* CardReaderPeripheral (ScsiCardReader) */
SCSI_DEVICE_TYPE_BRIDGE = 0x10, /* BridgePeripheral (ScsiBridge) */
SCSI_DEVICE_TYPE_OTHER = 0x11, /* OtherPeripheral (ScsiOther) */
SCSI_DEVICE_TYPE_MAX,
};
struct rt_scsi_ops;
struct rt_scsi_host
{
struct rt_device *dev;
const struct rt_scsi_ops *ops;
rt_size_t max_id;
rt_size_t max_lun;
rt_list_t lun_nodes;
};
struct rt_scsi_device
{
struct rt_scsi_host *host;
rt_list_t list;
rt_size_t id;
rt_size_t lun;
rt_uint32_t devtype;
rt_uint32_t removable;
rt_size_t last_block;
rt_size_t block_size;
void *priv;
};
struct rt_scsi_ops
{
rt_err_t (*reset)(struct rt_scsi_device *sdev);
rt_err_t (*transfer)(struct rt_scsi_device *sdev, struct rt_scsi_cmd *cmd);
};
rt_err_t rt_scsi_host_register(struct rt_scsi_host *scsi);
rt_err_t rt_scsi_host_unregister(struct rt_scsi_host *scsi);
rt_inline rt_bool_t rt_scsi_cmd_is_write(struct rt_scsi_cmd *cmd)
{
return cmd->op.write10.opcode == RT_SCSI_CMD_WRITE10 ||
cmd->op.write12.opcode == RT_SCSI_CMD_WRITE16 ||
cmd->op.write16.opcode == RT_SCSI_CMD_WRITE12;
}
rt_err_t rt_scsi_request_sense(struct rt_scsi_device *sdev,
struct rt_scsi_request_sense_data *out_data);
rt_err_t rt_scsi_test_unit_ready(struct rt_scsi_device *sdev);
rt_err_t rt_scsi_inquiry(struct rt_scsi_device *sdev,
struct rt_scsi_inquiry_data *out_data);
rt_err_t rt_scsi_read_capacity10(struct rt_scsi_device *sdev,
struct rt_scsi_read_capacity10_data *out_data);
rt_err_t rt_scsi_read_capacity16(struct rt_scsi_device *sdev,
struct rt_scsi_read_capacity16_data *out_data);
rt_err_t rt_scsi_read10(struct rt_scsi_device *sdev,
rt_off_t lba, void *buffer, rt_size_t size);
rt_err_t rt_scsi_read12(struct rt_scsi_device *sdev,
rt_off_t lba, void *buffer, rt_size_t size);
rt_err_t rt_scsi_read16(struct rt_scsi_device *sdev,
rt_off_t lba, void *buffer, rt_size_t size);
rt_err_t rt_scsi_write10(struct rt_scsi_device *sdev,
rt_off_t lba, const void *buffer, rt_size_t size);
rt_err_t rt_scsi_write12(struct rt_scsi_device *sdev,
rt_off_t lba, const void *buffer, rt_size_t size);
rt_err_t rt_scsi_write16(struct rt_scsi_device *sdev,
rt_off_t lba, const void *buffer, rt_size_t size);
rt_err_t rt_scsi_synchronize_cache10(struct rt_scsi_device *sdev,
rt_off_t lba, rt_size_t size);
rt_err_t rt_scsi_synchronize_cache16(struct rt_scsi_device *sdev,
rt_off_t lba, rt_size_t size);
rt_err_t rt_scsi_write_same10(struct rt_scsi_device *sdev,
rt_off_t lba, rt_size_t size);
rt_err_t rt_scsi_write_same16(struct rt_scsi_device *sdev,
rt_off_t lba, rt_size_t size);
rt_err_t rt_scsi_mode_select6(struct rt_scsi_device *sdev,
rt_uint8_t pf, rt_uint8_t sp, void *buffer, rt_size_t size,
struct rt_scsi_mode_select_data *data);
rt_err_t rt_scsi_mode_select10(struct rt_scsi_device *sdev,
rt_uint8_t pf, rt_uint8_t sp, void *buffer, rt_size_t size,
struct rt_scsi_mode_select_data *data);
rt_err_t rt_scsi_mode_sense6(struct rt_scsi_device *sdev,
rt_uint8_t dbd, rt_uint8_t modepage, rt_uint8_t subpage, void *buffer, rt_size_t size,
struct rt_scsi_mode_select_data *data);
rt_err_t rt_scsi_mode_sense10(struct rt_scsi_device *sdev,
rt_uint8_t dbd, rt_uint8_t modepage, rt_uint8_t subpage, void *buffer, rt_size_t size,
struct rt_scsi_mode_select_data *data);
#endif /* __SCSI_H__ */

View File

@@ -12,7 +12,7 @@
#define __SENSOR_H__
#include <rtthread.h>
#include "pin.h"
#include "dev_pin.h"
#ifdef __cplusplus
extern "C" {
@@ -53,6 +53,9 @@ extern "C" {
#define RT_SENSOR_CLASS_IAQ (19) /* IAQ sensor. */
#define RT_SENSOR_CLASS_ETOH (20) /* EtOH sensor. */
#define RT_SENSOR_CLASS_BP (21) /* Blood Pressure */
#define RT_SENSOR_CLASS_VOLTAGE (22) /* Voltage sensor */
#define RT_SENSOR_CLASS_CURRENT (23) /* Current sensor */
#define RT_SENSOR_CLASS_POWER (24) /* Power sensor */
/* Sensor vendor types */
@@ -95,6 +98,9 @@ extern "C" {
#define RT_SENSOR_UNIT_DD (17) /* Coordinates unit: DD */
#define RT_SENSOR_UNIT_MGM3 (18) /* Concentration unit: mg/m3 */
#define RT_SENSOR_UNIT_MMHG (19) /* Blood Pressure unit: mmHg */
#define RT_SENSOR_UNIT_MV (20) /* Voltage unit: mV */
#define RT_SENSOR_UNIT_MA (21) /* Current unit: mA */
#define RT_SENSOR_UNIT_MW (22) /* Power unit: mW */
/* Sensor communication interface types */
#define RT_SENSOR_INTF_I2C (1 << 0)
@@ -230,9 +236,12 @@ struct rt_sensor_data
rt_uint32_t dust; /* Dust sensor. unit: ug/m3 */
rt_uint32_t eco2; /* eCO2 sensor. unit: ppm */
rt_uint32_t spo2; /* SpO2 sensor. unit: permillage */
rt_uint32_t iaq; /* IAQ sensor. unit: 1 */
rt_uint32_t etoh; /* EtOH sensor. unit: ppm */
rt_uint32_t iaq; /* IAQ sensor. unit: 1 */
rt_uint32_t etoh; /* EtOH sensor. unit: ppm */
struct sensor_bp bp; /* BloodPressure. unit: mmHg */
float mv; /* Voltage sensor. unit: mv */
float ma; /* Current sensor. unit: ma */
float mw; /* Power sensor. unit: mw */
} data;
};

View File

@@ -13,7 +13,7 @@
#define __SENSOR_H__
#include <rtthread.h>
#include "pin.h"
#include "dev_pin.h"
#ifdef __cplusplus
extern "C" {

View File

@@ -0,0 +1,69 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-11-20 zhujiale the first version
*/
#ifndef __RTT_BYPASS_H__
#define __RTT_BYPASS_H__
#include <rtthread.h>
#include <rttypes.h>
#include <rtdevice.h>
typedef rt_err_t(*bypass_function_t)(struct rt_serial_device* serial, char buf, void* data);
#define RT_BYPASS_LEVEL_MAX 4
#define RT_BYPASS_LEVEL_1 0
#define RT_BYPASS_LEVEL_2 1
#define RT_BYPASS_LEVEL_3 2
#define RT_BYPASS_LEVEL_4 3
#define RT_BYPASS_MAX_LEVEL 4
/*The protect level can be register but can not be unregister we should use it carefully*/
#define RT_BYPASS_PROTECT_LEVEL_1 10
#define RT_BYPASS_PROTECT_LEVEL_2 11
#define RT_BYPASS_PROTECT_LEVEL_3 12
#define RT_BYPASS_PROTECT_LEVEL_4 13
struct rt_serial_bypass_func {
/*The function pointer of the bypassed data processing*/
bypass_function_t bypass;
/*The smaller the array of levels, the higher the priority of execution*/
rt_uint8_t level;
rt_list_t node;
char name[RT_NAME_MAX];
void* data;
};
struct rt_serial_bypass_head
{
rt_list_t head;
struct rt_spinlock spinlock;
};
struct rt_serial_bypass {
struct rt_work work;
struct rt_spinlock spinlock;
struct rt_workqueue* lower_workq;
struct rt_serial_bypass_head* upper_h;
struct rt_serial_bypass_head* lower_h;
rt_mutex_t mutex;
struct rt_ringbuffer* pipe;
};
int serial_bypass_list(int argc, char** argv);
void rt_bypass_work_straight(struct rt_serial_device* serial);
void rt_bypass_putchar(struct rt_serial_device* serial, rt_uint8_t ch);
rt_size_t rt_bypass_getchar(struct rt_serial_device* serial, rt_uint8_t* ch);
rt_err_t rt_bypass_upper_unregister(struct rt_serial_device* serial, rt_uint8_t level);
rt_err_t rt_bypass_lower_unregister(struct rt_serial_device* serial, rt_uint8_t level);
rt_err_t rt_bypass_upper_register(struct rt_serial_device* serial, const char* name, rt_uint8_t level, bypass_function_t func, void* data);
rt_err_t rt_bypass_lower_register(struct rt_serial_device* serial, const char* name, rt_uint8_t level, bypass_function_t func, void* data);
#endif

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#ifndef __SYSCON_H__
#define __SYSCON_H__
#include <drivers/ofw.h>
struct rt_syscon
{
rt_list_t list;
struct rt_ofw_node *np;
void *iomem_base;
rt_size_t iomem_size;
struct rt_spinlock rw_lock;
};
rt_err_t rt_syscon_read(struct rt_syscon *syscon, rt_off_t offset, rt_uint32_t *out_val);
rt_err_t rt_syscon_write(struct rt_syscon *syscon, rt_off_t offset, rt_uint32_t val);
rt_err_t rt_syscon_update_bits(struct rt_syscon *syscon, rt_off_t offset, rt_uint32_t mask, rt_uint32_t val);
struct rt_syscon *rt_syscon_find_by_ofw_node(struct rt_ofw_node *np);
struct rt_syscon *rt_syscon_find_by_ofw_compatible(const char *compatible);
struct rt_syscon *rt_syscon_find_by_ofw_phandle(struct rt_ofw_node *np, const char *propname);
#endif /* __SYSCON_H__ */

View File

@@ -0,0 +1,205 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#ifndef __THERMAL_H__
#define __THERMAL_H__
#include <rtdef.h>
#include <dt-bindings/thermal/thermal.h>
/* No upper/lower limit requirement */
#define RT_THERMAL_NO_LIMIT ((rt_uint32_t)THERMAL_NO_LIMIT)
#define RT_THERMAL_TEMP_INVALID (-274000)
struct rt_thermal_zone_ops;
struct rt_thermal_cooling_device;
struct rt_thermal_cooling_device_ops;
struct rt_thermal_cooling_governor;
enum rt_thermal_trip_type
{
RT_THERMAL_TRIP_ACTIVE = 0,
RT_THERMAL_TRIP_PASSIVE,
RT_THERMAL_TRIP_HOT,
RT_THERMAL_TRIP_CRITICAL,
RT_THERMAL_TRIP_TYPE_MAX,
};
struct rt_thermal_trip
{
/* Temperature value in millidegree celsius */
int temperature;
/* Relative hysteresis in millidegree celsius */
int hysteresis;
enum rt_thermal_trip_type type;
void *priv;
};
struct rt_thermal_zone_params
{
/* Sustainable power (heat) that this thermal zone can dissipate in mW */
int sustainable_power;
/* Slope of a linear temperature adjustment curve */
int slope;
/* Offset of a linear temperature adjustment curve */
int offset;
};
struct rt_thermal_cooling_cell
{
struct rt_thermal_cooling_device *cooling_devices;
rt_uint32_t level_range[2];
};
struct rt_thermal_cooling_map
{
rt_uint32_t contribution;
rt_size_t cells_nr;
struct rt_thermal_cooling_cell *cells;
struct rt_thermal_trip *trips;
};
struct rt_thermal_zone_device
{
struct rt_device parent;
int zone_id;
const struct rt_thermal_zone_ops *ops;
rt_bool_t trips_free;
rt_size_t trips_nr;
struct rt_thermal_trip *trips;
struct rt_thermal_zone_params params;
rt_bool_t enabled;
rt_bool_t cooling;
int temperature;
int last_temperature;
int prev_low_trip;
int prev_high_trip;
rt_list_t notifier_nodes;
struct rt_spinlock nodes_lock;
rt_size_t cooling_maps_nr;
struct rt_thermal_cooling_map *cooling_maps;
rt_tick_t passive_delay, polling_delay;
struct rt_work poller;
struct rt_mutex mutex;
void *priv;
};
struct rt_thermal_zone_ops
{
rt_err_t (*get_temp)(struct rt_thermal_zone_device *zdev, int *out_temp);
rt_err_t (*set_trips)(struct rt_thermal_zone_device *zdev, int low_temp, int high_temp);
rt_err_t (*set_trip_temp)(struct rt_thermal_zone_device *zdev, int trip_id, int temp);
rt_err_t (*set_trip_hyst)(struct rt_thermal_zone_device *zdev, int trip_id, int hyst);
void (*hot)(struct rt_thermal_zone_device *zdev);
void (*critical)(struct rt_thermal_zone_device *zdev);
};
/*
* We don't want to make a temperature control system
* that is finer than an air conditioner's temperature control,
* just ensure get a reliable heat dissipation under high-load task
* or when the SoC temperature is too high.
*/
struct rt_thermal_cooling_device
{
struct rt_device parent;
const struct rt_thermal_cooling_device_ops *ops;
/* The cooling capacity indicator */
rt_ubase_t max_level;
rt_list_t governor_node;
struct rt_thermal_cooling_governor *gov;
void *priv;
};
struct rt_thermal_cooling_device_ops
{
rt_err_t (*bind)(struct rt_thermal_cooling_device *cdev, struct rt_thermal_zone_device *zdev);
rt_err_t (*unbind)(struct rt_thermal_cooling_device *cdev, struct rt_thermal_zone_device *zdev);
rt_err_t (*get_max_level)(struct rt_thermal_cooling_device *cdev, rt_ubase_t *out_level);
rt_err_t (*get_cur_level)(struct rt_thermal_cooling_device *cdev, rt_ubase_t *out_level);
rt_err_t (*set_cur_level)(struct rt_thermal_cooling_device *cdev, rt_ubase_t level);
};
struct rt_thermal_cooling_governor
{
rt_list_t list;
const char *name;
rt_list_t cdev_nodes;
void (*tuning)(struct rt_thermal_zone_device *zdev,
int map_idx, int cell_idx, rt_ubase_t *level);
};
struct rt_thermal_notifier;
#define RT_THERMAL_MSG_EVENT_UNSPECIFIED RT_BIT(0) /* Unspecified event */
#define RT_THERMAL_MSG_EVENT_TEMP_SAMPLE RT_BIT(1) /* New Temperature sample */
#define RT_THERMAL_MSG_TRIP_VIOLATED RT_BIT(2) /* TRIP Point violation */
#define RT_THERMAL_MSG_TRIP_CHANGED RT_BIT(3) /* TRIP Point temperature changed */
#define RT_THERMAL_MSG_DEVICE_DOWN RT_BIT(4) /* Thermal device is down */
#define RT_THERMAL_MSG_DEVICE_UP RT_BIT(5) /* Thermal device is up after a down event */
#define RT_THERMAL_MSG_DEVICE_POWER_CAPABILITY_CHANGED RT_BIT(6) /* Power capability changed */
#define RT_THERMAL_MSG_TABLE_CHANGED RT_BIT(7) /* Thermal table(s) changed */
#define RT_THERMAL_MSG_EVENT_KEEP_ALIVE RT_BIT(8) /* Request for user space handler to respond */
typedef rt_err_t (*rt_thermal_notifier_callback)(struct rt_thermal_notifier *notifier,
rt_ubase_t msg);
struct rt_thermal_notifier
{
rt_list_t list;
struct rt_thermal_zone_device *zdev;
rt_thermal_notifier_callback callback;
void *priv;
};
rt_err_t rt_thermal_zone_device_register(struct rt_thermal_zone_device *zdev);
rt_err_t rt_thermal_zone_device_unregister(struct rt_thermal_zone_device *zdev);
rt_err_t rt_thermal_cooling_device_register(struct rt_thermal_cooling_device *cdev);
rt_err_t rt_thermal_cooling_device_unregister(struct rt_thermal_cooling_device *cdev);
rt_err_t rt_thermal_cooling_governor_register(struct rt_thermal_cooling_governor *gov);
rt_err_t rt_thermal_cooling_governor_unregister(struct rt_thermal_cooling_governor *gov);
rt_err_t rt_thermal_cooling_device_change_governor(struct rt_thermal_cooling_device *cdev,
const char *name);
rt_err_t rt_thermal_zone_notifier_register(struct rt_thermal_zone_device *zdev,
struct rt_thermal_notifier *notifier);
rt_err_t rt_thermal_zone_notifier_unregister(struct rt_thermal_zone_device *zdev,
struct rt_thermal_notifier *notifier);
void rt_thermal_zone_device_update(struct rt_thermal_zone_device *zdev, rt_ubase_t msg);
void rt_thermal_cooling_device_kick(struct rt_thermal_zone_device *zdev);
rt_err_t rt_thermal_zone_set_trip(struct rt_thermal_zone_device *zdev, int trip_id,
const struct rt_thermal_trip *trip);
rt_err_t rt_thermal_zone_get_trip(struct rt_thermal_zone_device *zdev, int trip_id,
struct rt_thermal_trip *out_trip);
#endif /* __THERMAL_H__ */

View File

@@ -12,10 +12,10 @@
#define __WLAN_H__
#include <rtthread.h>
#include <wlan_dev.h>
#include <wlan_cfg.h>
#include <wlan_mgnt.h>
#include <wlan_prot.h>
#include <wlan_workqueue.h>
#include <dev_wlan.h>
#include <dev_wlan_cfg.h>
#include <dev_wlan_mgnt.h>
#include <dev_wlan_prot.h>
#include <dev_wlan_workqueue.h>
#endif

View File

@@ -0,0 +1,24 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __DT_BINDINGS_PHYE_H__
#define __DT_BINDINGS_PHYE_H__
#define PHY_NONE 0
#define PHY_TYPE_SATA 1
#define PHY_TYPE_PCIE 2
#define PHY_TYPE_USB2 3
#define PHY_TYPE_USB3 4
#define PHY_TYPE_UFS 5
#define PHY_TYPE_DP 6
#define PHY_TYPE_XPCS 7
#define PHY_TYPE_SGMII 8
#define PHY_TYPE_QSGMII 9
#define PHY_TYPE_DPHY 10
#define PHY_TYPE_CPHY 11
#define PHY_TYPE_USXGMII 12
#endif /* __DT_BINDINGS_PHYE_H__ */

Some files were not shown because too many files have changed in this diff Show More