rtt更新

This commit is contained in:
2025-01-18 13:25:25 +08:00
parent c6a7554b51
commit d6009a0773
726 changed files with 103376 additions and 6270 deletions

View File

@@ -1,5 +1,6 @@
config RT_USING_SDIO
bool "Using SD/MMC device drivers"
select RT_USING_BLK
default n
if RT_USING_SDIO
@@ -24,5 +25,8 @@ config RT_USING_SDIO
default 16
config RT_SDIO_DEBUG
bool "Enable SDIO debug log output"
default n
endif
default n
config RT_USING_SDHCI
bool "Using sdhci for sd/mmc drivers"
default n
endif

View File

@@ -3,16 +3,20 @@ from building import *
cwd = GetCurrentDir()
src = Split("""
block_dev.c
mmcsd_core.c
sd.c
sdio.c
gpt.c
mmc.c
dev_block.c
dev_mmcsd_core.c
dev_sd.c
dev_sdio.c
dev_mmc.c
""")
# The set of source files associated with this SConscript file.
path = [cwd + '/../include']
path = [cwd + '/../include' , cwd + '/sdhci/include']
if GetDepend('RT_USING_SDHCI'):
src += [os.path.join('sdhci', 'sdhci.c')]
src += [os.path.join('sdhci', 'fit-mmc.c')]
src += [os.path.join('sdhci', 'sdhci-platform.c')]
group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_SDIO'], CPPPATH = path)

View File

@@ -0,0 +1,422 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-07-25 weety first version
* 2023-08-08 GuEe-GUI port to the block
*/
#include <rtthread.h>
#include <drivers/blk.h>
#include <drivers/misc.h>
#include <drivers/dev_mmcsd_core.h>
#define DBG_TAG "SDIO"
#ifdef RT_SDIO_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_SDIO_DEBUG */
#include <rtdbg.h>
#ifndef RT_MMCSD_MAX_PARTITION
#define RT_MMCSD_MAX_PARTITION 16
#endif
struct mmcsd_blk_device
{
struct rt_blk_disk parent;
struct rt_mmcsd_card *card;
rt_size_t max_req_size;
struct rt_device_blk_geometry geometry;
};
#define raw_to_mmcsd_blk(raw) rt_container_of(raw, struct mmcsd_blk_device, parent)
#ifdef RT_USING_DM
static struct rt_dm_ida sdio_ida = RT_DM_IDA_INIT(SDIO);
#endif
static int __send_status(struct rt_mmcsd_card *card, rt_uint32_t *status, unsigned retries)
{
int err;
struct rt_mmcsd_cmd cmd;
cmd.busy_timeout = 0;
cmd.cmd_code = SEND_STATUS;
cmd.arg = card->rca << 16;
cmd.flags = RESP_R1 | CMD_AC;
err = mmcsd_send_cmd(card->host, &cmd, retries);
if (err)
return err;
if (status)
*status = cmd.resp[0];
return 0;
}
static int card_busy_detect(struct rt_mmcsd_card *card, unsigned int timeout_ms,
rt_uint32_t *resp_errs)
{
int timeout = rt_tick_from_millisecond(timeout_ms);
int err = 0;
rt_uint32_t status;
rt_tick_t start;
start = rt_tick_get();
do
{
rt_bool_t out = (int)(rt_tick_get() - start) > timeout;
err = __send_status(card, &status, 5);
if (err)
{
LOG_E("error %d requesting status", err);
return err;
}
/* Accumulate any response error bits seen */
if (resp_errs)
*resp_errs |= status;
if (out)
{
LOG_E("wait card busy timeout");
return -RT_ETIMEOUT;
}
/*
* Some cards mishandle the status bits,
* so make sure to check both the busy
* indication and the card state.
*/
}
while (!(status & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(status) == 7));
return err;
}
rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card)
{
rt_int32_t err;
rt_uint32_t blocks;
struct rt_mmcsd_req req;
struct rt_mmcsd_cmd cmd;
struct rt_mmcsd_data data;
rt_uint32_t timeout_us;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = APP_CMD;
cmd.arg = card->rca << 16;
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
err = mmcsd_send_cmd(card->host, &cmd, 0);
if (err)
return -RT_ERROR;
if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
return -RT_ERROR;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS;
cmd.arg = 0;
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
data.timeout_ns = card->tacc_ns * 100;
data.timeout_clks = card->tacc_clks * 100;
timeout_us = data.timeout_ns / 1000;
timeout_us += data.timeout_clks * 1000 /
(card->host->io_cfg.clock / 1000);
if (timeout_us > 100000)
{
data.timeout_ns = 100000000;
data.timeout_clks = 0;
}
data.blksize = 4;
data.blks = 1;
data.flags = DATA_DIR_READ;
data.buf = &blocks;
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
req.cmd = &cmd;
req.data = &data;
mmcsd_send_request(card->host, &req);
if (cmd.err || data.err)
return -RT_ERROR;
return blocks;
}
static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
rt_uint32_t sector,
void *buf,
rt_size_t blks,
rt_uint8_t dir)
{
struct rt_mmcsd_cmd cmd, stop;
struct rt_mmcsd_data data;
struct rt_mmcsd_req req;
struct rt_mmcsd_host *host = card->host;
rt_uint32_t r_cmd, w_cmd;
mmcsd_host_lock(host);
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd));
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
req.cmd = &cmd;
req.data = &data;
cmd.arg = sector;
if (!(card->flags & CARD_FLAG_SDHC))
{
cmd.arg <<= 9;
}
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
data.blksize = SECTOR_SIZE;
data.blks = blks;
if (blks > 1)
{
if (!controller_is_spi(card->host) || !dir)
{
req.stop = &stop;
stop.cmd_code = STOP_TRANSMISSION;
stop.arg = 0;
stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC;
}
r_cmd = READ_MULTIPLE_BLOCK;
w_cmd = WRITE_MULTIPLE_BLOCK;
}
else
{
req.stop = RT_NULL;
r_cmd = READ_SINGLE_BLOCK;
w_cmd = WRITE_BLOCK;
}
if (!controller_is_spi(card->host) && (card->flags & 0x8000))
{
/* last request is WRITE,need check busy */
card_busy_detect(card, 10000, RT_NULL);
}
if (!dir)
{
cmd.cmd_code = r_cmd;
data.flags |= DATA_DIR_READ;
card->flags &= 0x7fff;
}
else
{
cmd.cmd_code = w_cmd;
data.flags |= DATA_DIR_WRITE;
card->flags |= 0x8000;
}
mmcsd_set_data_timeout(&data, card);
data.buf = buf;
mmcsd_send_request(host, &req);
mmcsd_host_unlock(host);
if (cmd.err || data.err || stop.err)
{
LOG_E("mmcsd request blocks error");
LOG_E("%d,%d,%d, 0x%08x,0x%08x",
cmd.err, data.err, stop.err, data.flags, sector);
return -RT_ERROR;
}
return RT_EOK;
}
static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
{
struct rt_mmcsd_cmd cmd;
int err;
/* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
if (card->flags & CARD_FLAG_SDHC)
return 0;
mmcsd_host_lock(card->host);
cmd.cmd_code = SET_BLOCKLEN;
cmd.arg = 512;
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
err = mmcsd_send_cmd(card->host, &cmd, 5);
mmcsd_host_unlock(card->host);
if (err)
{
LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err);
return -RT_ERROR;
}
return 0;
}
static rt_ssize_t mmcsd_blk_read(struct rt_blk_disk *disk, rt_off_t sector,
void *buffer, rt_size_t sector_count)
{
rt_err_t err;
rt_size_t offset = 0;
rt_size_t req_size = 0;
rt_size_t remain_size = sector_count;
void *rd_ptr = (void *)buffer;
struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
while (remain_size)
{
req_size = rt_min_t(rt_size_t, remain_size, blk_dev->max_req_size);
err = rt_mmcsd_req_blk(blk_dev->card, sector + offset, rd_ptr, req_size, 0);
if (err)
{
return err;
}
offset += req_size;
rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
remain_size -= req_size;
}
return sector_count - remain_size;
}
static rt_ssize_t mmcsd_blk_write(struct rt_blk_disk *disk, rt_off_t sector,
const void *buffer, rt_size_t sector_count)
{
rt_err_t err;
rt_size_t offset = 0;
rt_size_t req_size = 0;
rt_size_t remain_size = sector_count;
void *wr_ptr = (void *)buffer;
struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
while (remain_size)
{
req_size = rt_min_t(rt_size_t, remain_size, blk_dev->max_req_size);
err = rt_mmcsd_req_blk(blk_dev->card, sector + offset, wr_ptr, req_size, 1);
if (err)
{
return err;
}
offset += req_size;
wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
remain_size -= req_size;
}
return sector_count - remain_size;
}
static rt_err_t mmcsd_blk_getgeome(struct rt_blk_disk *disk,
struct rt_device_blk_geometry *geometry)
{
struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
rt_memcpy(geometry, &blk_dev->geometry, sizeof(*geometry));
return RT_EOK;
}
static const struct rt_blk_disk_ops mmcsd_blk_ops =
{
.read = mmcsd_blk_read,
.write = mmcsd_blk_write,
.getgeome = mmcsd_blk_getgeome,
};
rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
{
rt_err_t err;
struct rt_mmcsd_host *host = card->host;
struct mmcsd_blk_device *blk_dev = rt_calloc(1, sizeof(*blk_dev));
if (!blk_dev)
{
return -RT_ENOMEM;
}
card->blk_dev = blk_dev;
#ifdef RT_USING_DM
blk_dev->parent.ida = &sdio_ida;
#endif
blk_dev->parent.parallel_io = RT_FALSE;
blk_dev->parent.removable = controller_is_removable(host);
blk_dev->parent.ops = &mmcsd_blk_ops;
blk_dev->parent.max_partitions = RT_MMCSD_MAX_PARTITION;
blk_dev->card = card;
blk_dev->max_req_size = rt_min_t(rt_size_t,
host->max_dma_segs * host->max_seg_size,
host->max_blk_count * host->max_blk_size) >> 9;
blk_dev->geometry.bytes_per_sector = 1 << 9;
blk_dev->geometry.block_size = card->card_blksize;
blk_dev->geometry.sector_count = card->card_capacity * (1024 / 512);
/* Set blk size before partitions probe, Why? */
if ((err = mmcsd_set_blksize(card)))
{
goto _fail;
}
rt_thread_mdelay(1);
#ifdef RT_USING_DM
rt_dm_dev_set_name(&blk_dev->parent.parent, host->name);
#else
rt_strncpy(blk_dev->parent.parent.parent.name, host->name, RT_NAME_MAX);
#endif
if ((err = rt_hw_blk_disk_register(&blk_dev->parent)))
{
goto _fail;
}
return RT_EOK;
_fail:
card->blk_dev = RT_NULL;
free(blk_dev);
return err;
}
void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
{
struct mmcsd_blk_device *blk_dev = card->blk_dev;
if (!blk_dev)
{
return;
}
if (!rt_hw_blk_disk_unregister(&blk_dev->parent))
{
card->blk_dev = RT_NULL;
rt_free(blk_dev);
}
}

View File

@@ -0,0 +1,792 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-06-15 hichard first version
* 2024-05-25 HPMicro add HS400 support
*/
#include <drivers/dev_mmcsd_core.h>
#include <drivers/dev_mmc.h>
#define DBG_TAG "SDIO"
#ifdef RT_SDIO_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_SDIO_DEBUG */
#include <rtdbg.h>
static const rt_uint32_t tran_unit[] =
{
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
};
static const rt_uint8_t tran_value[] =
{
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
static const rt_uint32_t tacc_uint[] =
{
1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
};
static const rt_uint8_t tacc_value[] =
{
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
rt_inline rt_uint32_t GET_BITS(rt_uint32_t *resp,
rt_uint32_t start,
rt_uint32_t size)
{
const rt_int32_t __size = size;
const rt_uint32_t __mask = (__size < 32 ? 1 << __size : 0) - 1;
const rt_int32_t __off = 3 - ((start) / 32);
const rt_int32_t __shft = (start) & 31;
rt_uint32_t __res;
__res = resp[__off] >> __shft;
if (__size + __shft > 32)
__res |= resp[__off - 1] << ((32 - __shft) % 32);
return __res & __mask;
}
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
static rt_int32_t mmcsd_parse_csd(struct rt_mmcsd_card *card)
{
rt_uint32_t a, b;
struct rt_mmcsd_csd *csd = &card->csd;
rt_uint32_t *resp = card->resp_csd;
/*
* We only understand CSD structure v1.1 and v1.2.
* v1.2 has extra information in bits 15, 11 and 10.
* We also support eMMC v4.4 & v4.41.
*/
csd->csd_structure = GET_BITS(resp, 126, 2);
if (csd->csd_structure == 0)
{
LOG_E("unrecognised CSD structure version %d!", csd->csd_structure);
return -RT_ERROR;
}
csd->taac = GET_BITS(resp, 112, 8);
csd->nsac = GET_BITS(resp, 104, 8);
csd->tran_speed = GET_BITS(resp, 96, 8);
csd->card_cmd_class = GET_BITS(resp, 84, 12);
csd->rd_blk_len = GET_BITS(resp, 80, 4);
csd->rd_blk_part = GET_BITS(resp, 79, 1);
csd->wr_blk_misalign = GET_BITS(resp, 78, 1);
csd->rd_blk_misalign = GET_BITS(resp, 77, 1);
csd->dsr_imp = GET_BITS(resp, 76, 1);
csd->c_size = GET_BITS(resp, 62, 12);
csd->c_size_mult = GET_BITS(resp, 47, 3);
csd->r2w_factor = GET_BITS(resp, 26, 3);
csd->wr_blk_len = GET_BITS(resp, 22, 4);
csd->wr_blk_partial = GET_BITS(resp, 21, 1);
csd->csd_crc = GET_BITS(resp, 1, 7);
card->card_blksize = 1 << csd->rd_blk_len;
card->tacc_clks = csd->nsac * 100;
card->tacc_ns = (tacc_uint[csd->taac & 0x07] * tacc_value[(csd->taac & 0x78) >> 3] + 9) / 10;
card->max_data_rate = tran_unit[csd->tran_speed & 0x07] * tran_value[(csd->tran_speed & 0x78) >> 3];
if (csd->wr_blk_len >= 9)
{
a = GET_BITS(resp, 42, 5);
b = GET_BITS(resp, 37, 5);
card->erase_size = (a + 1) * (b + 1);
card->erase_size <<= csd->wr_blk_len - 9;
}
return 0;
}
/*
* Read extended CSD.
*/
static int mmc_get_ext_csd(struct rt_mmcsd_card *card, rt_uint8_t **new_ext_csd)
{
void *ext_csd;
struct rt_mmcsd_req req;
struct rt_mmcsd_cmd cmd;
struct rt_mmcsd_data data;
*new_ext_csd = RT_NULL;
if (GET_BITS(card->resp_csd, 122, 4) < 4)
return 0;
/*
* As the ext_csd is so large and mostly unused, we don't store the
* raw block in mmc_card.
*/
ext_csd = rt_malloc(512);
if (!ext_csd)
{
LOG_E("alloc memory failed when get ext csd!");
return -RT_ENOMEM;
}
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
req.cmd = &cmd;
req.data = &data;
cmd.cmd_code = SEND_EXT_CSD;
cmd.arg = 0;
/* NOTE HACK: the RESP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
* CSD or CID. Native versions of those commands use the R2 type,
* not R1 plus a data block.
*/
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
data.blksize = 512;
data.blks = 1;
data.flags = DATA_DIR_READ;
data.buf = ext_csd;
/*
* Some cards require longer data read timeout than indicated in CSD.
* Address this by setting the read timeout to a "reasonably high"
* value. For the cards tested, 300ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
*/
data.timeout_ns = 300000000;
data.timeout_clks = 0;
mmcsd_send_request(card->host, &req);
if (cmd.err)
return cmd.err;
if (data.err)
return data.err;
*new_ext_csd = ext_csd;
return 0;
}
/*
* Decode extended CSD.
*/
static int mmc_parse_ext_csd(struct rt_mmcsd_card *card, rt_uint8_t *ext_csd)
{
rt_uint64_t card_capacity = 0;
struct rt_mmcsd_host *host;
if (card == RT_NULL || ext_csd == RT_NULL)
{
LOG_E("emmc parse ext csd fail, invaild args");
return -1;
}
host = card->host;
uint8_t device_type = ext_csd[EXT_CSD_CARD_TYPE];
if ((host->flags & MMCSD_SUP_HS400) && (device_type & EXT_CSD_CARD_TYPE_HS400))
{
card->flags |= CARD_FLAG_HS400;
card->max_data_rate = 200000000;
}
else if ((host->flags & MMCSD_SUP_HS200) && (device_type & EXT_CSD_CARD_TYPE_HS200))
{
card->flags |= CARD_FLAG_HS200;
card->max_data_rate = 200000000;
}
else if ((host->flags & MMCSD_SUP_HIGHSPEED_DDR) && (device_type & EXT_CSD_CARD_TYPE_DDR_52))
{
card->flags |= CARD_FLAG_HIGHSPEED_DDR;
card->hs_max_data_rate = 52000000;
}
else
{
card->flags |= CARD_FLAG_HIGHSPEED;
card->hs_max_data_rate = 52000000;
}
if (ext_csd[EXT_CSD_STROBE_SUPPORT] != 0)
{
card->ext_csd.enhanced_data_strobe = 1;
}
card->ext_csd.cache_size =
ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
card_capacity = *((rt_uint32_t *)&ext_csd[EXT_CSD_SEC_CNT]);
card->card_sec_cnt = card_capacity;
card_capacity *= card->card_blksize;
card_capacity >>= 10; /* unit:KB */
card->card_capacity = card_capacity;
LOG_I("emmc card capacity %d KB, card sec count:%d.", card->card_capacity, card->card_sec_cnt);
return 0;
}
/**
* mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
* @set: cmd set values
* @index: EXT_CSD register index
* @value: value to program into EXT_CSD register
*
* Modifies the EXT_CSD register for selected card.
*/
static int mmc_switch(struct rt_mmcsd_card *card, rt_uint8_t set,
rt_uint8_t index, rt_uint8_t value)
{
int err;
struct rt_mmcsd_host *host = card->host;
struct rt_mmcsd_cmd cmd = {0};
cmd.cmd_code = SWITCH;
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
(index << 16) | (value << 8) | set;
cmd.flags = RESP_R1B | CMD_AC;
err = mmcsd_send_cmd(host, &cmd, 3);
if (err)
return err;
return 0;
}
static int mmc_compare_ext_csds(struct rt_mmcsd_card *card,
rt_uint8_t *ext_csd, rt_uint32_t bus_width)
{
rt_uint8_t *bw_ext_csd;
int err;
if (bus_width == MMCSD_BUS_WIDTH_1)
return 0;
err = mmc_get_ext_csd(card, &bw_ext_csd);
if (err || bw_ext_csd == RT_NULL)
{
err = -RT_ERROR;
goto out;
}
/* only compare read only fields */
err = !((ext_csd[EXT_CSD_PARTITION_SUPPORT] == bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
(ext_csd[EXT_CSD_ERASED_MEM_CONT] == bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
(ext_csd[EXT_CSD_REV] == bw_ext_csd[EXT_CSD_REV]) &&
(ext_csd[EXT_CSD_STRUCTURE] == bw_ext_csd[EXT_CSD_STRUCTURE]) &&
(ext_csd[EXT_CSD_CARD_TYPE] == bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
(ext_csd[EXT_CSD_S_A_TIMEOUT] == bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
(ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
(ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] == bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
(ext_csd[EXT_CSD_SEC_TRIM_MULT] == bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
(ext_csd[EXT_CSD_SEC_ERASE_MULT] == bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
(ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] == bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
(ext_csd[EXT_CSD_TRIM_MULT] == bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
(ext_csd[EXT_CSD_SEC_CNT + 0] == bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
(ext_csd[EXT_CSD_SEC_CNT + 1] == bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
(ext_csd[EXT_CSD_SEC_CNT + 2] == bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
(ext_csd[EXT_CSD_SEC_CNT + 3] == bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
(ext_csd[EXT_CSD_PWR_CL_52_195] == bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
(ext_csd[EXT_CSD_PWR_CL_26_195] == bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
(ext_csd[EXT_CSD_PWR_CL_52_360] == bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
(ext_csd[EXT_CSD_PWR_CL_26_360] == bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
(ext_csd[EXT_CSD_PWR_CL_200_195] == bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
(ext_csd[EXT_CSD_PWR_CL_200_360] == bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
(ext_csd[EXT_CSD_PWR_CL_DDR_52_195] == bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
(ext_csd[EXT_CSD_PWR_CL_DDR_52_360] == bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
(ext_csd[EXT_CSD_PWR_CL_DDR_200_360] == bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
if (err)
err = -RT_ERROR;
out:
rt_free(bw_ext_csd);
return err;
}
/*
* Select the bus width among 4-bit and 8-bit(SDR).
* If the bus width is changed successfully, return the selected width value.
* Zero is returned instead of error value if the wide width is not supported.
*/
static int mmc_select_bus_width(struct rt_mmcsd_card *card, rt_uint8_t *ext_csd)
{
rt_uint32_t ext_csd_bits[][2] =
{
{EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8},
{EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4},
{EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1},
};
rt_uint32_t bus_widths[] =
{
MMCSD_BUS_WIDTH_8,
MMCSD_BUS_WIDTH_4,
MMCSD_BUS_WIDTH_1
};
struct rt_mmcsd_host *host = card->host;
unsigned idx, bus_width = 0;
int err = 0, ddr = 0;
if (GET_BITS(card->resp_csd, 122, 4) < 4)
return 0;
if (card->flags & CARD_FLAG_HIGHSPEED_DDR)
{
ddr = 2;
}
/*
* Unlike SD, MMC cards don't have a configuration register to notify
* supported bus width. So bus test command should be run to identify
* the supported bus width or compare the EXT_CSD values of current
* bus width and EXT_CSD values of 1 bit mode read earlier.
*/
for (idx = 0; idx < sizeof(bus_widths) / sizeof(rt_uint32_t); idx++)
{
/*
* Determine BUS WIDTH mode according to the capability of host
*/
if (((ext_csd_bits[idx][0] == EXT_CSD_BUS_WIDTH_8) && ((host->flags & MMCSD_BUSWIDTH_8) == 0)) ||
((ext_csd_bits[idx][0] == EXT_CSD_BUS_WIDTH_4) && ((host->flags & MMCSD_BUSWIDTH_4) == 0)))
{
continue;
}
bus_width = bus_widths[idx];
if (bus_width == MMCSD_BUS_WIDTH_1)
{
ddr = 0;
}
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx][0]);
if (err)
continue;
mmcsd_set_bus_width(host, bus_width);
err = mmc_compare_ext_csds(card, ext_csd, bus_width);
if (!err)
{
break;
}
else
{
switch (ext_csd_bits[idx][0])
{
case 0:
LOG_E("switch to bus width 1 bit failed!");
break;
case 1:
LOG_E("switch to bus width 4 bit failed!");
break;
case 2:
LOG_E("switch to bus width 8 bit failed!");
break;
default:
break;
}
}
}
if (!err && ddr)
{
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx][1]);
}
if (!err)
{
if (card->flags & (CARD_FLAG_HIGHSPEED | CARD_FLAG_HIGHSPEED_DDR))
{
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING,
1);
}
}
return err;
}
rt_err_t mmc_send_op_cond(struct rt_mmcsd_host *host,
rt_uint32_t ocr, rt_uint32_t *rocr)
{
struct rt_mmcsd_cmd cmd;
rt_uint32_t i;
rt_err_t err = RT_EOK;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SEND_OP_COND;
cmd.arg = controller_is_spi(host) ? 0 : ocr;
cmd.flags = RESP_SPI_R1 | RESP_R3 | CMD_BCR;
for (i = 100; i; i--)
{
err = mmcsd_send_cmd(host, &cmd, 3);
if (err)
break;
/* if we're just probing, do a single pass */
if (ocr == 0)
break;
/* otherwise wait until reset completes */
if (controller_is_spi(host))
{
if (!(cmd.resp[0] & R1_SPI_IDLE))
break;
}
else
{
if (cmd.resp[0] & CARD_BUSY)
break;
}
err = -RT_ETIMEOUT;
rt_thread_mdelay(10); //delay 10ms
}
if (rocr && !controller_is_spi(host))
*rocr = cmd.resp[0];
return err;
}
static rt_err_t mmc_set_card_addr(struct rt_mmcsd_host *host, rt_uint32_t rca)
{
rt_err_t err;
struct rt_mmcsd_cmd cmd;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SET_RELATIVE_ADDR;
cmd.arg = rca << 16;
cmd.flags = RESP_R1 | CMD_AC;
err = mmcsd_send_cmd(host, &cmd, 3);
if (err)
return err;
return 0;
}
static int mmc_select_hs200(struct rt_mmcsd_card *card)
{
int ret;
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200);
if (ret)
return ret;
mmcsd_set_timing(card->host, MMCSD_TIMING_MMC_HS200);
mmcsd_set_clock(card->host, card->max_data_rate);
ret = mmcsd_excute_tuning(card);
return ret;
}
static int mmc_switch_to_hs400(struct rt_mmcsd_card *card)
{
struct rt_mmcsd_host *host = card->host;
int err;
rt_uint8_t ext_csd_bus_width;
rt_uint32_t hs_timing;
/* Switch to HS_TIMING to 0x01 (High Speed) */
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
if (err != RT_EOK)
{
return err;
}
mmcsd_set_timing(card->host, MMCSD_TIMING_MMC_HS);
/* Host changes frequency to <= 52MHz */
mmcsd_set_clock(card->host, 52000000);
rt_bool_t support_enhanced_ds = ((card->ext_csd.enhanced_data_strobe != 0) &&
((host->flags & MMCSD_SUP_ENH_DS) != 0));
/* Set the bus width to:
* 0x86 if enhanced data strobe is supported, or
* 0x06 if enhanced data strobe is not supported
*/
ext_csd_bus_width = support_enhanced_ds ?
EXT_CSD_DDR_BUS_WIDTH_8_EH_DS :
EXT_CSD_DDR_BUS_WIDTH_8;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bus_width);
if (err != RT_EOK)
{
return err;
}
/* Set HS_TIMING to 0x03 (HS400) */
err = mmc_switch(card,
EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING,
EXT_CSD_TIMING_HS400);
if (err != RT_EOK)
{
return err;
}
/* Change the Host timing accordingly */
hs_timing = support_enhanced_ds ?
MMCSD_TIMING_MMC_HS400_ENH_DS :
MMCSD_TIMING_MMC_HS400;
mmcsd_set_timing(host, hs_timing);
/* Host may changes frequency to <= 200MHz */
mmcsd_set_clock(card->host, card->max_data_rate);
return RT_EOK;
}
static int mmc_select_hs400(struct rt_mmcsd_card *card)
{
int ret;
struct rt_mmcsd_host *host = card->host;
/* if the card or host doesn't support enhanced data strobe, switch to HS200 and perform tuning process first */
if ((card->ext_csd.enhanced_data_strobe == 0) || ((host->flags & MMCSD_SUP_ENH_DS) == 0))
{
ret = mmc_select_hs200(card);
if (ret != RT_EOK)
{
return ret;
}
}
return mmc_switch_to_hs400(card);
}
static int mmc_select_timing(struct rt_mmcsd_card *card)
{
int ret = 0;
if (card->flags & CARD_FLAG_HS400)
{
LOG_I("emmc: switch to HS400 mode\n");
ret = mmc_select_hs400(card);
}
else if (card->flags & CARD_FLAG_HS200)
{
LOG_I("emmc: switch to HS200 mode\n");
ret = mmc_select_hs200(card);
}
else if (card->flags & CARD_FLAG_HIGHSPEED_DDR)
{
LOG_I("emmc: switch to HIGH Speed DDR mode\n");
mmcsd_set_timing(card->host, MMCSD_TIMING_MMC_DDR52);
mmcsd_set_clock(card->host, card->hs_max_data_rate);
}
else
{
LOG_I("emmc: switch to HIGH Speed mode\n");
mmcsd_set_timing(card->host, MMCSD_TIMING_MMC_HS);
mmcsd_set_clock(card->host, card->hs_max_data_rate);
}
return ret;
}
static rt_int32_t mmcsd_mmc_init_card(struct rt_mmcsd_host *host,
rt_uint32_t ocr)
{
rt_int32_t err;
rt_uint32_t resp[4];
rt_uint32_t rocr = 0;
rt_uint8_t *ext_csd = RT_NULL;
struct rt_mmcsd_card *card = RT_NULL;
mmcsd_go_idle(host);
/* The extra bit indicates that we support high capacity */
err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
if (err)
goto err;
if (controller_is_spi(host))
{
err = mmcsd_spi_use_crc(host, 1);
if (err)
goto err1;
}
if (controller_is_spi(host))
err = mmcsd_get_cid(host, resp);
else
err = mmcsd_all_get_cid(host, resp);
if (err)
goto err;
card = rt_malloc(sizeof(struct rt_mmcsd_card));
if (!card)
{
LOG_E("malloc card failed!");
err = -RT_ENOMEM;
goto err;
}
rt_memset(card, 0, sizeof(struct rt_mmcsd_card));
card->card_type = CARD_TYPE_MMC;
card->host = host;
card->rca = 1;
rt_memcpy(card->resp_cid, resp, sizeof(card->resp_cid));
/*
* For native busses: get card RCA and quit open drain mode.
*/
if (!controller_is_spi(host))
{
err = mmc_set_card_addr(host, card->rca);
if (err)
goto err1;
mmcsd_set_bus_mode(host, MMCSD_BUSMODE_PUSHPULL);
}
err = mmcsd_get_csd(card, card->resp_csd);
if (err)
goto err1;
err = mmcsd_parse_csd(card);
if (err)
goto err1;
if (!controller_is_spi(host))
{
err = mmcsd_select_card(card);
if (err)
goto err1;
}
/*
* Fetch and process extended CSD.
*/
err = mmc_get_ext_csd(card, &ext_csd);
if (err)
goto err1;
err = mmc_parse_ext_csd(card, ext_csd);
if (err)
goto err1;
/* If doing byte addressing, check if required to do sector
* addressing. Handle the case of <2GB cards needing sector
* addressing. See section 8.1 JEDEC Standard JED84-A441;
* ocr register has bit 30 set for sector addressing.
*/
if (!(card->flags & CARD_FLAG_SDHC) && (rocr & (1 << 30)))
card->flags |= CARD_FLAG_SDHC;
/*switch bus width and bus mode*/
err = mmc_select_bus_width(card, ext_csd);
if (err)
{
LOG_E("mmc select buswidth fail");
goto err0;
}
err = mmc_select_timing(card);
if (err)
{
LOG_E("mmc select timing fail");
goto err0;
}
if (card->ext_csd.cache_size > 0)
{
mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_CACHE_CTRL, 1);
}
host->card = card;
rt_free(ext_csd);
return 0;
err0:
rt_free(ext_csd);
err1:
rt_free(card);
err:
return err;
}
/*
* Starting point for mmc card init.
*/
rt_int32_t init_mmc(struct rt_mmcsd_host *host, rt_uint32_t ocr)
{
rt_int32_t err;
rt_uint32_t current_ocr;
/*
* We need to get OCR a different way for SPI.
*/
if (controller_is_spi(host))
{
err = mmcsd_spi_read_ocr(host, 0, &ocr);
if (err)
goto err;
}
current_ocr = mmcsd_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
if (!current_ocr)
{
err = -RT_ERROR;
goto err;
}
/*
* Detect and init the card.
*/
err = mmcsd_mmc_init_card(host, current_ocr);
if (err)
goto err;
mmcsd_host_unlock(host);
err = rt_mmcsd_blk_probe(host->card);
if (err)
goto remove_card;
mmcsd_host_lock(host);
return 0;
remove_card:
mmcsd_host_lock(host);
rt_mmcsd_blk_remove(host->card);
rt_free(host->card);
host->card = RT_NULL;
err:
LOG_E("init MMC card failed!");
return err;
}

View File

@@ -0,0 +1,781 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-07-25 weety first version
*/
#include <rtthread.h>
#include <drivers/dev_mmcsd_core.h>
#include <drivers/dev_sd.h>
#include <drivers/dev_mmc.h>
#include <drivers/dev_sdio.h>
#include <string.h>
#define DBG_TAG "SDIO"
#ifdef RT_SDIO_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_SDIO_DEBUG */
#include <rtdbg.h>
#ifndef RT_MMCSD_STACK_SIZE
#define RT_MMCSD_STACK_SIZE 1024
#endif
#ifndef RT_MMCSD_THREAD_PREORITY
#if (RT_THREAD_PRIORITY_MAX == 32)
#define RT_MMCSD_THREAD_PREORITY 0x16
#else
#define RT_MMCSD_THREAD_PREORITY 0x40
#endif
#endif
//static struct rt_semaphore mmcsd_sem;
static struct rt_thread mmcsd_detect_thread;
static rt_uint8_t mmcsd_stack[RT_MMCSD_STACK_SIZE];
static struct rt_mailbox mmcsd_detect_mb;
static rt_uint32_t mmcsd_detect_mb_pool[4];
static struct rt_mailbox mmcsd_hotpluge_mb;
static rt_uint32_t mmcsd_hotpluge_mb_pool[4];
void mmcsd_host_lock(struct rt_mmcsd_host *host)
{
rt_mutex_take(&host->bus_lock, RT_WAITING_FOREVER);
}
void mmcsd_host_unlock(struct rt_mmcsd_host *host)
{
rt_mutex_release(&host->bus_lock);
}
void mmcsd_req_complete(struct rt_mmcsd_host *host)
{
rt_sem_release(&host->sem_ack);
}
void mmcsd_send_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
{
do
{
req->cmd->retries--;
req->cmd->err = 0;
req->cmd->mrq = req;
if (req->data)
{
req->cmd->data = req->data;
req->data->err = 0;
req->data->mrq = req;
if (req->stop)
{
req->data->stop = req->stop;
req->stop->err = 0;
req->stop->mrq = req;
}
}
host->ops->request(host, req);
rt_sem_take(&host->sem_ack, RT_WAITING_FOREVER);
}
while (req->cmd->err && (req->cmd->retries > 0));
}
rt_int32_t mmcsd_send_cmd(struct rt_mmcsd_host *host,
struct rt_mmcsd_cmd *cmd,
int retries)
{
struct rt_mmcsd_req req;
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
rt_memset(cmd->resp, 0, sizeof(cmd->resp));
cmd->retries = retries;
req.cmd = cmd;
cmd->data = RT_NULL;
mmcsd_send_request(host, &req);
return cmd->err;
}
rt_int32_t mmcsd_go_idle(struct rt_mmcsd_host *host)
{
rt_int32_t err;
struct rt_mmcsd_cmd cmd;
if (!controller_is_spi(host))
{
mmcsd_set_chip_select(host, MMCSD_CS_HIGH);
rt_thread_mdelay(1);
}
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = GO_IDLE_STATE;
cmd.arg = 0;
cmd.flags = RESP_SPI_R1 | RESP_NONE | CMD_BC;
err = mmcsd_send_cmd(host, &cmd, 0);
rt_thread_mdelay(1);
if (!controller_is_spi(host))
{
mmcsd_set_chip_select(host, MMCSD_CS_IGNORE);
rt_thread_mdelay(1);
}
return err;
}
rt_int32_t mmcsd_spi_read_ocr(struct rt_mmcsd_host *host,
rt_int32_t high_capacity,
rt_uint32_t *ocr)
{
struct rt_mmcsd_cmd cmd;
rt_int32_t err;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SPI_READ_OCR;
cmd.arg = high_capacity ? (1 << 30) : 0;
cmd.flags = RESP_SPI_R3;
err = mmcsd_send_cmd(host, &cmd, 0);
*ocr = cmd.resp[1];
return err;
}
rt_int32_t mmcsd_all_get_cid(struct rt_mmcsd_host *host, rt_uint32_t *cid)
{
rt_int32_t err;
struct rt_mmcsd_cmd cmd;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = ALL_SEND_CID;
cmd.arg = 0;
cmd.flags = RESP_R2 | CMD_BCR;
err = mmcsd_send_cmd(host, &cmd, 3);
if (err)
return err;
rt_memcpy(cid, cmd.resp, sizeof(rt_uint32_t) * 4);
return 0;
}
rt_int32_t mmcsd_get_cid(struct rt_mmcsd_host *host, rt_uint32_t *cid)
{
rt_int32_t err, i;
struct rt_mmcsd_req req;
struct rt_mmcsd_cmd cmd;
struct rt_mmcsd_data data;
rt_uint32_t *buf = RT_NULL;
if (!controller_is_spi(host))
{
if (!host->card)
return -RT_ERROR;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SEND_CID;
cmd.arg = host->card->rca << 16;
cmd.flags = RESP_R2 | CMD_AC;
err = mmcsd_send_cmd(host, &cmd, 3);
if (err)
return err;
rt_memcpy(cid, cmd.resp, sizeof(rt_uint32_t) * 4);
return 0;
}
buf = (rt_uint32_t *)rt_malloc(16);
if (!buf)
{
LOG_E("allocate memory failed!");
return -RT_ENOMEM;
}
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
req.cmd = &cmd;
req.data = &data;
cmd.cmd_code = SEND_CID;
cmd.arg = 0;
/* NOTE HACK: the RESP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
* CSD or CID. Native versions of those commands use the R2 type,
* not R1 plus a data block.
*/
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
data.blksize = 16;
data.blks = 1;
data.flags = DATA_DIR_READ;
data.buf = buf;
/*
* The spec states that CSR and CID accesses have a timeout
* of 64 clock cycles.
*/
data.timeout_ns = 0;
data.timeout_clks = 64;
mmcsd_send_request(host, &req);
if (cmd.err || data.err)
{
rt_free(buf);
return -RT_ERROR;
}
for (i = 0; i < 4; i++)
cid[i] = buf[i];
rt_free(buf);
return 0;
}
rt_int32_t mmcsd_get_csd(struct rt_mmcsd_card *card, rt_uint32_t *csd)
{
rt_int32_t err, i;
struct rt_mmcsd_req req;
struct rt_mmcsd_cmd cmd;
struct rt_mmcsd_data data;
rt_uint32_t *buf = RT_NULL;
if (!controller_is_spi(card->host))
{
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SEND_CSD;
cmd.arg = card->rca << 16;
cmd.flags = RESP_R2 | CMD_AC;
err = mmcsd_send_cmd(card->host, &cmd, 3);
if (err)
return err;
rt_memcpy(csd, cmd.resp, sizeof(rt_uint32_t) * 4);
return 0;
}
buf = (rt_uint32_t *)rt_malloc(16);
if (!buf)
{
LOG_E("allocate memory failed!");
return -RT_ENOMEM;
}
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
req.cmd = &cmd;
req.data = &data;
cmd.cmd_code = SEND_CSD;
cmd.arg = 0;
/* NOTE HACK: the RESP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
* CSD or CID. Native versions of those commands use the R2 type,
* not R1 plus a data block.
*/
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
data.blksize = 16;
data.blks = 1;
data.flags = DATA_DIR_READ;
data.buf = buf;
/*
* The spec states that CSR and CID accesses have a timeout
* of 64 clock cycles.
*/
data.timeout_ns = 0;
data.timeout_clks = 64;
mmcsd_send_request(card->host, &req);
if (cmd.err || data.err)
{
rt_free(buf);
return -RT_ERROR;
}
for (i = 0; i < 4; i++)
csd[i] = buf[i];
rt_free(buf);
return 0;
}
static rt_int32_t _mmcsd_select_card(struct rt_mmcsd_host *host,
struct rt_mmcsd_card *card)
{
rt_int32_t err;
struct rt_mmcsd_cmd cmd;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SELECT_CARD;
if (card)
{
cmd.arg = card->rca << 16;
cmd.flags = RESP_R1 | CMD_AC;
}
else
{
cmd.arg = 0;
cmd.flags = RESP_NONE | CMD_AC;
}
err = mmcsd_send_cmd(host, &cmd, 3);
if (err)
return err;
return 0;
}
rt_int32_t mmcsd_select_card(struct rt_mmcsd_card *card)
{
return _mmcsd_select_card(card->host, card);
}
rt_int32_t mmcsd_deselect_cards(struct rt_mmcsd_card *card)
{
return _mmcsd_select_card(card->host, RT_NULL);
}
rt_int32_t mmcsd_spi_use_crc(struct rt_mmcsd_host *host, rt_int32_t use_crc)
{
struct rt_mmcsd_cmd cmd;
rt_int32_t err;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SPI_CRC_ON_OFF;
cmd.flags = RESP_SPI_R1;
cmd.arg = use_crc;
err = mmcsd_send_cmd(host, &cmd, 0);
if (!err)
host->spi_use_crc = use_crc;
return err;
}
rt_inline void mmcsd_set_iocfg(struct rt_mmcsd_host *host)
{
struct rt_mmcsd_io_cfg *io_cfg = &host->io_cfg;
mmcsd_dbg("clock %uHz busmode %u powermode %u cs %u Vdd %u "
"width %u \n",
io_cfg->clock, io_cfg->bus_mode,
io_cfg->power_mode, io_cfg->chip_select, io_cfg->vdd,
io_cfg->bus_width);
host->ops->set_iocfg(host, io_cfg);
}
/*
* Control chip select pin on a host.
*/
void mmcsd_set_chip_select(struct rt_mmcsd_host *host, rt_int32_t mode)
{
host->io_cfg.chip_select = mode;
mmcsd_set_iocfg(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
void mmcsd_set_clock(struct rt_mmcsd_host *host, rt_uint32_t clk)
{
if (clk < host->freq_min)
{
LOG_W("clock too low!");
}
host->io_cfg.clock = clk;
mmcsd_set_iocfg(host);
}
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
void mmcsd_set_bus_mode(struct rt_mmcsd_host *host, rt_uint32_t mode)
{
host->io_cfg.bus_mode = mode;
mmcsd_set_iocfg(host);
}
/*
* Change data bus width of a host.
*/
void mmcsd_set_bus_width(struct rt_mmcsd_host *host, rt_uint32_t width)
{
host->io_cfg.bus_width = width;
mmcsd_set_iocfg(host);
}
void mmcsd_set_timing(struct rt_mmcsd_host *host, rt_uint32_t timing)
{
host->io_cfg.timing = timing;
mmcsd_set_iocfg(host);
}
void mmcsd_set_data_timeout(struct rt_mmcsd_data *data,
const struct rt_mmcsd_card *card)
{
rt_uint32_t mult;
if (card->card_type == CARD_TYPE_SDIO)
{
data->timeout_ns = 1000000000; /* SDIO card 1s */
data->timeout_clks = 0;
return;
}
/*
* SD cards use a 100 multiplier rather than 10
*/
mult = (card->card_type == CARD_TYPE_SD) ? 100 : 10;
/*
* Scale up the multiplier (and therefore the timeout) by
* the r2w factor for writes.
*/
if (data->flags & DATA_DIR_WRITE)
mult <<= card->csd.r2w_factor;
data->timeout_ns = card->tacc_ns * mult;
data->timeout_clks = card->tacc_clks * mult;
/*
* SD cards also have an upper limit on the timeout.
*/
if (card->card_type == CARD_TYPE_SD)
{
rt_uint32_t timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
timeout_us += data->timeout_clks * 1000 /
(card->host->io_cfg.clock / 1000);
if (data->flags & DATA_DIR_WRITE)
/*
* The limit is really 250 ms, but that is
* insufficient for some crappy cards.
*/
limit_us = 300000;
else
limit_us = 100000;
/*
* SDHC cards always use these fixed values.
*/
if (timeout_us > limit_us || card->flags & CARD_FLAG_SDHC)
{
data->timeout_ns = limit_us * 1000; /* SDHC card fixed 250ms */
data->timeout_clks = 0;
}
}
if (controller_is_spi(card->host))
{
if (data->flags & DATA_DIR_WRITE)
{
if (data->timeout_ns < 1000000000)
data->timeout_ns = 1000000000; /* 1s */
}
else
{
if (data->timeout_ns < 100000000)
data->timeout_ns = 100000000; /* 100ms */
}
}
}
/*
* Mask off any voltages we don't support and select
* the lowest voltage
*/
rt_uint32_t mmcsd_select_voltage(struct rt_mmcsd_host *host, rt_uint32_t ocr)
{
int bit;
extern int __rt_ffs(int value);
ocr &= host->valid_ocr;
bit = __rt_ffs(ocr);
if (bit)
{
bit -= 1;
ocr &= 3 << bit;
host->io_cfg.vdd = bit;
mmcsd_set_iocfg(host);
}
else
{
LOG_W("host doesn't support card's voltages!");
ocr = 0;
}
return ocr;
}
static void mmcsd_power_up(struct rt_mmcsd_host *host)
{
int bit = __rt_fls(host->valid_ocr) - 1;
host->io_cfg.vdd = bit;
if (controller_is_spi(host))
{
host->io_cfg.chip_select = MMCSD_CS_HIGH;
host->io_cfg.bus_mode = MMCSD_BUSMODE_PUSHPULL;
}
else
{
host->io_cfg.chip_select = MMCSD_CS_IGNORE;
host->io_cfg.bus_mode = MMCSD_BUSMODE_OPENDRAIN;
}
host->io_cfg.power_mode = MMCSD_POWER_UP;
host->io_cfg.bus_width = MMCSD_BUS_WIDTH_1;
mmcsd_set_iocfg(host);
/*
* This delay should be sufficient to allow the power supply
* to reach the minimum voltage.
*/
rt_thread_mdelay(10);
host->io_cfg.clock = host->freq_min;
host->io_cfg.power_mode = MMCSD_POWER_ON;
mmcsd_set_iocfg(host);
/*
* This delay must be at least 74 clock sizes, or 1 ms, or the
* time required to reach a stable voltage.
*/
rt_thread_mdelay(10);
}
static void mmcsd_power_off(struct rt_mmcsd_host *host)
{
host->io_cfg.clock = 0;
host->io_cfg.vdd = 0;
if (!controller_is_spi(host))
{
host->io_cfg.bus_mode = MMCSD_BUSMODE_OPENDRAIN;
host->io_cfg.chip_select = MMCSD_CS_IGNORE;
}
host->io_cfg.power_mode = MMCSD_POWER_OFF;
host->io_cfg.bus_width = MMCSD_BUS_WIDTH_1;
mmcsd_set_iocfg(host);
}
int mmcsd_wait_cd_changed(rt_int32_t timeout)
{
struct rt_mmcsd_host *host;
if (rt_mb_recv(&mmcsd_hotpluge_mb, (rt_ubase_t *)&host, timeout) == RT_EOK)
{
if (host->card == RT_NULL)
{
return MMCSD_HOST_UNPLUGED;
}
else
{
return MMCSD_HOST_PLUGED;
}
}
return -RT_ETIMEOUT;
}
RTM_EXPORT(mmcsd_wait_cd_changed);
void mmcsd_change(struct rt_mmcsd_host *host)
{
rt_mb_send(&mmcsd_detect_mb, (rt_ubase_t)host);
}
void mmcsd_detect(void *param)
{
struct rt_mmcsd_host *host;
rt_uint32_t ocr;
rt_int32_t err;
while (1)
{
if (rt_mb_recv(&mmcsd_detect_mb, (rt_ubase_t *)&host, RT_WAITING_FOREVER) == RT_EOK)
{
if (host->card == RT_NULL)
{
mmcsd_host_lock(host);
mmcsd_power_up(host);
mmcsd_go_idle(host);
mmcsd_send_if_cond(host, host->valid_ocr);
err = sdio_io_send_op_cond(host, 0, &ocr);
if (!err)
{
if (init_sdio(host, ocr))
mmcsd_power_off(host);
mmcsd_host_unlock(host);
continue;
}
/*
* detect SD card
*/
err = mmcsd_send_app_op_cond(host, 0, &ocr);
if (!err)
{
if (init_sd(host, ocr))
mmcsd_power_off(host);
mmcsd_host_unlock(host);
rt_mb_send(&mmcsd_hotpluge_mb, (rt_ubase_t)host);
continue;
}
/*
* detect mmc card
*/
err = mmc_send_op_cond(host, 0, &ocr);
if (!err)
{
if (init_mmc(host, ocr))
mmcsd_power_off(host);
mmcsd_host_unlock(host);
rt_mb_send(&mmcsd_hotpluge_mb, (rt_ubase_t)host);
continue;
}
mmcsd_host_unlock(host);
}
else
{
/* card removed */
mmcsd_host_lock(host);
if (host->card->sdio_function_num != 0)
{
LOG_W("unsupport sdio card plug out!");
}
else
{
rt_mmcsd_blk_remove(host->card);
rt_free(host->card);
host->card = RT_NULL;
}
mmcsd_host_unlock(host);
rt_mb_send(&mmcsd_hotpluge_mb, (rt_ubase_t)host);
}
}
}
}
void mmcsd_host_init(struct rt_mmcsd_host *host)
{
rt_memset(host, 0, sizeof(struct rt_mmcsd_host));
strncpy(host->name, "sd", sizeof(host->name) - 1);
host->max_seg_size = 65535;
host->max_dma_segs = 1;
host->max_blk_size = 512;
host->max_blk_count = 4096;
rt_mutex_init(&host->bus_lock, "sd_bus_lock", RT_IPC_FLAG_FIFO);
rt_sem_init(&host->sem_ack, "sd_ack", 0, RT_IPC_FLAG_FIFO);
}
struct rt_mmcsd_host *mmcsd_alloc_host(void)
{
struct rt_mmcsd_host *host;
host = rt_malloc(sizeof(struct rt_mmcsd_host));
if (!host)
{
LOG_E("alloc host failed");
return RT_NULL;
}
mmcsd_host_init(host);
return host;
}
void mmcsd_free_host(struct rt_mmcsd_host *host)
{
rt_mutex_detach(&host->bus_lock);
rt_sem_detach(&host->sem_ack);
rt_free(host);
}
rt_int32_t mmcsd_excute_tuning(struct rt_mmcsd_card *card)
{
struct rt_mmcsd_host *host = card->host;
rt_int32_t opcode;
if (!host->ops->execute_tuning)
return RT_EOK;
if (card->card_type == CARD_TYPE_MMC)
opcode = SEND_TUNING_BLOCK_HS200;
else
opcode = SEND_TUNING_BLOCK;
return host->ops->execute_tuning(host, opcode);;
}
int rt_mmcsd_core_init(void)
{
rt_err_t ret;
/* initialize detect SD cart thread */
/* initialize mailbox and create detect SD card thread */
ret = rt_mb_init(&mmcsd_detect_mb, "mmcsdmb",
&mmcsd_detect_mb_pool[0], sizeof(mmcsd_detect_mb_pool) / sizeof(mmcsd_detect_mb_pool[0]),
RT_IPC_FLAG_FIFO);
RT_ASSERT(ret == RT_EOK);
ret = rt_mb_init(&mmcsd_hotpluge_mb, "mmcsdhotplugmb",
&mmcsd_hotpluge_mb_pool[0], sizeof(mmcsd_hotpluge_mb_pool) / sizeof(mmcsd_hotpluge_mb_pool[0]),
RT_IPC_FLAG_FIFO);
RT_ASSERT(ret == RT_EOK);
ret = rt_thread_init(&mmcsd_detect_thread, "mmcsd_detect", mmcsd_detect, RT_NULL,
&mmcsd_stack[0], RT_MMCSD_STACK_SIZE, RT_MMCSD_THREAD_PREORITY, 20);
if (ret == RT_EOK)
{
rt_thread_startup(&mmcsd_detect_thread);
}
rt_sdio_init();
return 0;
}
INIT_PREV_EXPORT(rt_mmcsd_core_init);

View File

@@ -0,0 +1,869 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-07-25 weety first version
* 2024-05-26 HPMicro add UHS-I support
*/
#include <drivers/dev_mmcsd_core.h>
#include <drivers/dev_sd.h>
#define DBG_TAG "SDIO"
#ifdef RT_SDIO_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_SDIO_DEBUG */
#include <rtdbg.h>
static const rt_uint32_t tran_unit[] =
{
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
};
static const rt_uint8_t tran_value[] =
{
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
static const rt_uint32_t tacc_uint[] =
{
1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
};
static const rt_uint8_t tacc_value[] =
{
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
rt_inline rt_uint32_t GET_BITS(rt_uint32_t *resp,
rt_uint32_t start,
rt_uint32_t size)
{
const rt_int32_t __size = size;
const rt_uint32_t __mask = (__size < 32 ? 1 << __size : 0) - 1;
const rt_int32_t __off = 3 - ((start) / 32);
const rt_int32_t __shft = (start) & 31;
rt_uint32_t __res;
__res = resp[__off] >> __shft;
if (__size + __shft > 32)
__res |= resp[__off-1] << ((32 - __shft) % 32);
return __res & __mask;
}
static rt_int32_t mmcsd_parse_csd(struct rt_mmcsd_card *card)
{
struct rt_mmcsd_csd *csd = &card->csd;
rt_uint32_t *resp = card->resp_csd;
csd->csd_structure = GET_BITS(resp, 126, 2);
switch (csd->csd_structure)
{
case 0:
csd->taac = GET_BITS(resp, 112, 8);
csd->nsac = GET_BITS(resp, 104, 8);
csd->tran_speed = GET_BITS(resp, 96, 8);
csd->card_cmd_class = GET_BITS(resp, 84, 12);
csd->rd_blk_len = GET_BITS(resp, 80, 4);
csd->rd_blk_part = GET_BITS(resp, 79, 1);
csd->wr_blk_misalign = GET_BITS(resp, 78, 1);
csd->rd_blk_misalign = GET_BITS(resp, 77, 1);
csd->dsr_imp = GET_BITS(resp, 76, 1);
csd->c_size = GET_BITS(resp, 62, 12);
csd->c_size_mult = GET_BITS(resp, 47, 3);
csd->r2w_factor = GET_BITS(resp, 26, 3);
csd->wr_blk_len = GET_BITS(resp, 22, 4);
csd->wr_blk_partial = GET_BITS(resp, 21, 1);
csd->csd_crc = GET_BITS(resp, 1, 7);
card->card_blksize = 1 << csd->rd_blk_len;
card->card_capacity = (csd->c_size + 1) << (csd->c_size_mult + 2);
card->card_capacity *= card->card_blksize;
card->card_capacity >>= 10; /* unit:KB */
card->tacc_clks = csd->nsac * 100;
card->tacc_ns = (tacc_uint[csd->taac&0x07] * tacc_value[(csd->taac&0x78)>>3] + 9) / 10;
card->max_data_rate = tran_unit[csd->tran_speed&0x07] * tran_value[(csd->tran_speed&0x78)>>3];
break;
case 1:
card->flags |= CARD_FLAG_SDHC;
/*This field is fixed to 0Eh, which indicates 1 ms.
The host should not use TAAC, NSAC, and R2W_FACTOR
to calculate timeout and should uses fixed timeout
values for read and write operations*/
csd->taac = GET_BITS(resp, 112, 8);
csd->nsac = GET_BITS(resp, 104, 8);
csd->tran_speed = GET_BITS(resp, 96, 8);
csd->card_cmd_class = GET_BITS(resp, 84, 12);
csd->rd_blk_len = GET_BITS(resp, 80, 4);
csd->rd_blk_part = GET_BITS(resp, 79, 1);
csd->wr_blk_misalign = GET_BITS(resp, 78, 1);
csd->rd_blk_misalign = GET_BITS(resp, 77, 1);
csd->dsr_imp = GET_BITS(resp, 76, 1);
csd->c_size = GET_BITS(resp, 48, 22);
csd->r2w_factor = GET_BITS(resp, 26, 3);
csd->wr_blk_len = GET_BITS(resp, 22, 4);
csd->wr_blk_partial = GET_BITS(resp, 21, 1);
csd->csd_crc = GET_BITS(resp, 1, 7);
card->card_blksize = 512;
card->card_capacity = (csd->c_size + 1) * 512; /* unit:KB */
card->card_sec_cnt = card->card_capacity * 2;
card->tacc_clks = 0;
card->tacc_ns = 0;
card->max_data_rate = tran_unit[csd->tran_speed&0x07] * tran_value[(csd->tran_speed&0x78)>>3];
break;
default:
LOG_E("unrecognised CSD structure version %d!", csd->csd_structure);
return -RT_ERROR;
}
LOG_I("SD card capacity %d KB.", card->card_capacity);
return 0;
}
static rt_int32_t mmcsd_parse_scr(struct rt_mmcsd_card *card)
{
struct rt_sd_scr *scr = &card->scr;
rt_uint32_t resp[4];
resp[3] = card->resp_scr[1];
resp[2] = card->resp_scr[0];
scr->sd_version = GET_BITS(resp, 56, 4);
scr->sd_bus_widths = GET_BITS(resp, 48, 4);
return 0;
}
static rt_int32_t mmcsd_switch(struct rt_mmcsd_card *card)
{
rt_int32_t err;
struct rt_mmcsd_host *host = card->host;
struct rt_mmcsd_req req;
struct rt_mmcsd_cmd cmd;
struct rt_mmcsd_data data;
rt_uint8_t *buf;
buf = (rt_uint8_t*)rt_malloc(64);
if (!buf)
{
LOG_E("alloc memory failed!");
return -RT_ENOMEM;
}
if (card->card_type != CARD_TYPE_SD)
goto err;
if (card->scr.sd_version < SCR_SPEC_VER_1)
goto err;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SD_SWITCH;
cmd.arg = 0x00FFFFF1;
cmd.flags = RESP_R1 | CMD_ADTC;
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
mmcsd_set_data_timeout(&data, card);
data.blksize = 64;
data.blks = 1;
data.flags = DATA_DIR_READ;
data.buf = (rt_uint32_t *)buf;
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
req.cmd = &cmd;
req.data = &data;
mmcsd_send_request(host, &req);
if (cmd.err || data.err)
{
goto err1;
}
if (buf[13] & 0x02)
card->hs_max_data_rate = 50000000;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SD_SWITCH;
rt_uint32_t switch_func_timing;
if ((card->flags & CARD_FLAG_SDR104) && (card->host->flags & MMCSD_SUP_SDR104))
{
switch_func_timing = SD_SWITCH_FUNC_TIMING_SDR104;
}
else if ((card->flags & CARD_FLAG_SDR50) && (card->host->flags & MMCSD_SUP_SDR50))
{
switch_func_timing = SD_SWITCH_FUNC_TIMING_SDR50;
}
else if ((card->flags & CARD_FLAG_DDR50) && (card->host->flags & MMCSD_SUP_DDR50))
{
switch_func_timing = SD_SWITCH_FUNC_TIMING_DDR50;
}
else
{
switch_func_timing = SD_SWITCH_FUNC_TIMING_HS;
}
cmd.arg = 0x80FFFFF0 | switch_func_timing;
cmd.flags = RESP_R1 | CMD_ADTC;
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
mmcsd_set_data_timeout(&data, card);
data.blksize = 64;
data.blks = 1;
data.flags = DATA_DIR_READ;
data.buf = (rt_uint32_t *)buf;
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
req.cmd = &cmd;
req.data = &data;
mmcsd_send_request(host, &req);
if (cmd.err || data.err)
{
goto err1;
}
if ((buf[16] & 0xF) != switch_func_timing)
{
LOG_E("switching card to timing mode %d failed!", switch_func_timing);
goto err;
}
switch(switch_func_timing)
{
case SD_SWITCH_FUNC_TIMING_SDR104:
card->flags |= CARD_FLAG_SDR104;
break;
case SD_SWITCH_FUNC_TIMING_SDR50:
card->flags |= CARD_FLAG_SDR50;
break;
case SD_SWITCH_FUNC_TIMING_DDR50:
card->flags |= CARD_FLAG_DDR50;
break;
case SD_SWITCH_FUNC_TIMING_HS:
card->flags |= CARD_FLAG_HIGHSPEED;
break;
default:
/* Default speed */
break;
}
card->max_data_rate = 50000000;
if (switch_func_timing == SD_SWITCH_FUNC_TIMING_SDR104)
{
LOG_I("sd: switch to SDR104 mode\n");
mmcsd_set_timing(card->host, MMCSD_TIMING_UHS_SDR104);
mmcsd_set_clock(card->host, 208000000);
err = mmcsd_excute_tuning(card);
card->max_data_rate = 208000000;
}
else if (switch_func_timing == SD_SWITCH_FUNC_TIMING_SDR50)
{
LOG_I("sd: switch to SDR50 mode\n");
mmcsd_set_timing(card->host, MMCSD_TIMING_UHS_SDR50);
mmcsd_set_clock(card->host, 100000000);
err = mmcsd_excute_tuning(card);
card->max_data_rate = 10000000;
}
else if (switch_func_timing == SD_SWITCH_FUNC_TIMING_DDR50)
{
LOG_I("sd: switch to DDR50 mode\n");
mmcsd_set_timing(card->host, MMCSD_TIMING_UHS_DDR50);
mmcsd_set_clock(card->host, 50000000);
}
else
{
LOG_I("sd: switch to High Speed / SDR25 mode \n");
mmcsd_set_timing(card->host, MMCSD_TIMING_SD_HS);
mmcsd_set_clock(card->host, 50000000);
}
err:
rt_free(buf);
return 0;
err1:
if (cmd.err)
err = cmd.err;
if (data.err)
err = data.err;
return err;
}
static rt_err_t mmcsd_app_cmd(struct rt_mmcsd_host *host,
struct rt_mmcsd_card *card)
{
rt_err_t err;
struct rt_mmcsd_cmd cmd = {0};
cmd.cmd_code = APP_CMD;
if (card)
{
cmd.arg = card->rca << 16;
cmd.flags = RESP_R1 | CMD_AC;
}
else
{
cmd.arg = 0;
cmd.flags = RESP_R1 | CMD_BCR;
}
err = mmcsd_send_cmd(host, &cmd, 0);
if (err)
return err;
/* Check that card supported application commands */
if (!controller_is_spi(host) && !(cmd.resp[0] & R1_APP_CMD))
return -RT_ERROR;
return RT_EOK;
}
rt_err_t mmcsd_send_app_cmd(struct rt_mmcsd_host *host,
struct rt_mmcsd_card *card,
struct rt_mmcsd_cmd *cmd,
int retry)
{
struct rt_mmcsd_req req;
int i;
rt_err_t err;
err = -RT_ERROR;
/*
* We have to resend MMC_APP_CMD for each attempt so
* we cannot use the retries field in mmc_command.
*/
for (i = 0; i <= retry; i++)
{
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
err = mmcsd_app_cmd(host, card);
if (err)
{
/* no point in retrying; no APP commands allowed */
if (controller_is_spi(host))
{
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
break;
}
continue;
}
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
rt_memset(cmd->resp, 0, sizeof(cmd->resp));
req.cmd = cmd;
//cmd->data = NULL;
mmcsd_send_request(host, &req);
err = cmd->err;
if (!cmd->err)
break;
/* no point in retrying illegal APP commands */
if (controller_is_spi(host))
{
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
break;
}
}
return err;
}
rt_err_t mmcsd_app_set_bus_width(struct rt_mmcsd_card *card, rt_int32_t width)
{
rt_err_t err;
struct rt_mmcsd_cmd cmd;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SD_APP_SET_BUS_WIDTH;
cmd.flags = RESP_R1 | CMD_AC;
switch (width)
{
case MMCSD_BUS_WIDTH_1:
cmd.arg = MMCSD_BUS_WIDTH_1;
break;
case MMCSD_BUS_WIDTH_4:
cmd.arg = MMCSD_BUS_WIDTH_4;
break;
default:
return -RT_ERROR;
}
err = mmcsd_send_app_cmd(card->host, card, &cmd, 3);
if (err)
return err;
return RT_EOK;
}
rt_err_t mmcsd_send_app_op_cond(struct rt_mmcsd_host *host,
rt_uint32_t ocr,
rt_uint32_t *rocr)
{
struct rt_mmcsd_cmd cmd;
rt_uint32_t i;
rt_err_t err = RT_EOK;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SD_APP_OP_COND;
if (controller_is_spi(host))
cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
else
cmd.arg = ocr;
cmd.flags = RESP_SPI_R1 | RESP_R3 | CMD_BCR;
for (i = 1000; i; i--)
{
err = mmcsd_send_app_cmd(host, RT_NULL, &cmd, 3);
if (err)
break;
/* if we're just probing, do a single pass */
if (ocr == 0)
break;
/* otherwise wait until reset completes */
if (controller_is_spi(host))
{
if (!(cmd.resp[0] & R1_SPI_IDLE))
break;
}
else
{
if (cmd.resp[0] & CARD_BUSY)
break;
}
err = -RT_ETIMEOUT;
rt_thread_mdelay(10); //delay 10ms
}
if (rocr && !controller_is_spi(host))
*rocr = cmd.resp[0];
return err;
}
/*
* To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
* before SD_APP_OP_COND. This command will harmlessly fail for
* SD 1.0 cards.
*/
rt_err_t mmcsd_send_if_cond(struct rt_mmcsd_host *host, rt_uint32_t ocr)
{
struct rt_mmcsd_cmd cmd;
rt_err_t err;
rt_uint8_t pattern;
cmd.cmd_code = SD_SEND_IF_COND;
cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | 0xAA;
cmd.flags = RESP_SPI_R7 | RESP_R7 | CMD_BCR;
err = mmcsd_send_cmd(host, &cmd, 0);
if (err)
return err;
if (controller_is_spi(host))
pattern = cmd.resp[1] & 0xFF;
else
pattern = cmd.resp[0] & 0xFF;
if (pattern != 0xAA)
return -RT_ERROR;
return RT_EOK;
}
rt_err_t mmcsd_get_card_addr(struct rt_mmcsd_host *host, rt_uint32_t *rca)
{
rt_err_t err;
struct rt_mmcsd_cmd cmd;
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
cmd.cmd_code = SD_SEND_RELATIVE_ADDR;
cmd.arg = 0;
cmd.flags = RESP_R6 | CMD_BCR;
err = mmcsd_send_cmd(host, &cmd, 3);
if (err)
return err;
*rca = cmd.resp[0] >> 16;
return RT_EOK;
}
#define be32_to_cpu(x) ((rt_uint32_t)( \
(((rt_uint32_t)(x) & (rt_uint32_t)0x000000ffUL) << 24) | \
(((rt_uint32_t)(x) & (rt_uint32_t)0x0000ff00UL) << 8) | \
(((rt_uint32_t)(x) & (rt_uint32_t)0x00ff0000UL) >> 8) | \
(((rt_uint32_t)(x) & (rt_uint32_t)0xff000000UL) >> 24)))
rt_int32_t mmcsd_get_scr(struct rt_mmcsd_card *card, rt_uint32_t *scr)
{
rt_int32_t err;
struct rt_mmcsd_req req;
struct rt_mmcsd_cmd cmd;
struct rt_mmcsd_data data;
err = mmcsd_app_cmd(card->host, card);
if (err)
return err;
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
req.cmd = &cmd;
req.data = &data;
cmd.cmd_code = SD_APP_SEND_SCR;
cmd.arg = 0;
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
data.blksize = 8;
data.blks = 1;
data.flags = DATA_DIR_READ;
data.buf = scr;
mmcsd_set_data_timeout(&data, card);
mmcsd_send_request(card->host, &req);
if (cmd.err)
return cmd.err;
if (data.err)
return data.err;
scr[0] = be32_to_cpu(scr[0]);
scr[1] = be32_to_cpu(scr[1]);
return 0;
}
static rt_err_t mmcsd_read_sd_status(struct rt_mmcsd_card *card, rt_uint32_t *sd_status)
{
rt_int32_t err;
struct rt_mmcsd_req req;
struct rt_mmcsd_cmd cmd;
struct rt_mmcsd_data data;
err = mmcsd_app_cmd(card->host, card);
if (err)
return err;
rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
req.cmd = &cmd;
req.data = &data;
cmd.cmd_code = SEND_STATUS;
cmd.arg = 0;
cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
data.blksize = 64;
data.blks = 1;
data.flags = DATA_DIR_READ;
data.buf = sd_status;
mmcsd_set_data_timeout(&data, card);
mmcsd_send_request(card->host, &req);
if (cmd.err)
return cmd.err;
if (data.err)
return data.err;
/* Convert endian */
for (uint32_t i=0; i < 8; i++)
{
uint32_t tmp = sd_status[i];
sd_status[i] = sd_status[15 - i];
sd_status[15 - i] = tmp;
}
for (uint32_t i=0; i < 16; i++)
{
sd_status[i] = be32_to_cpu(sd_status[i]);
}
return 0;
}
static rt_err_t sd_switch_voltage(struct rt_mmcsd_host *host)
{
rt_err_t err;
struct rt_mmcsd_cmd cmd = { 0 };
cmd.cmd_code = VOLTAGE_SWITCH;
cmd.arg = 0;
cmd.flags = RESP_R1 | CMD_AC;
err = mmcsd_send_cmd(host, &cmd, 0);
if (err)
return err;
return RT_EOK;
}
static rt_err_t sd_switch_uhs_voltage(struct rt_mmcsd_host *host)
{
if (host->ops->switch_uhs_voltage != RT_NULL)
{
return host->ops->switch_uhs_voltage(host);
}
return -ENOSYS;
}
static rt_int32_t mmcsd_sd_init_card(struct rt_mmcsd_host *host,
rt_uint32_t ocr)
{
struct rt_mmcsd_card *card;
rt_int32_t err;
rt_uint32_t resp[4];
rt_uint32_t max_data_rate;
mmcsd_go_idle(host);
/*
* If SD_SEND_IF_COND indicates an SD 2.0
* compliant card and we should set bit 30
* of the ocr to indicate that we can handle
* block-addressed SDHC cards.
*/
err = mmcsd_send_if_cond(host, ocr);
if (!err)
ocr |= 1 << 30;
/* Switch to UHS voltage if both Host and the Card support this feature */
if (((host->valid_ocr & VDD_165_195) != 0) && (host->ops->switch_uhs_voltage != RT_NULL))
{
ocr |= OCR_S18R;
}
err = mmcsd_send_app_op_cond(host, ocr, &ocr);
if (err)
goto err2;
/* Select voltage */
if (ocr & OCR_S18R)
{
ocr = VDD_165_195;
err = sd_switch_voltage(host);
if (err)
goto err2;
err = sd_switch_uhs_voltage(host);
if (err)
goto err2;
}
if (controller_is_spi(host))
err = mmcsd_get_cid(host, resp);
else
err = mmcsd_all_get_cid(host, resp);
if (err)
goto err2;
card = rt_malloc(sizeof(struct rt_mmcsd_card));
if (!card)
{
LOG_E("malloc card failed!");
err = -RT_ENOMEM;
goto err2;
}
rt_memset(card, 0, sizeof(struct rt_mmcsd_card));
card->card_type = CARD_TYPE_SD;
card->host = host;
rt_memcpy(card->resp_cid, resp, sizeof(card->resp_cid));
/*
* For native busses: get card RCA and quit open drain mode.
*/
if (!controller_is_spi(host))
{
err = mmcsd_get_card_addr(host, &card->rca);
if (err)
goto err1;
mmcsd_set_bus_mode(host, MMCSD_BUSMODE_PUSHPULL);
}
err = mmcsd_get_csd(card, card->resp_csd);
if (err)
goto err1;
err = mmcsd_parse_csd(card);
if (err)
goto err1;
if (!controller_is_spi(host))
{
err = mmcsd_select_card(card);
if (err)
goto err1;
}
err = mmcsd_get_scr(card, card->resp_scr);
if (err)
goto err1;
mmcsd_parse_scr(card);
if (controller_is_spi(host))
{
err = mmcsd_spi_use_crc(host, 1);
if (err)
goto err1;
}
mmcsd_set_timing(host, MMCSD_TIMING_LEGACY);
mmcsd_set_clock(host, 25000000);
/*switch bus width*/
if ((host->flags & MMCSD_BUSWIDTH_4) && (card->scr.sd_bus_widths & SD_SCR_BUS_WIDTH_4))
{
err = mmcsd_app_set_bus_width(card, MMCSD_BUS_WIDTH_4);
if (err)
goto err1;
mmcsd_set_bus_width(host, MMCSD_BUS_WIDTH_4);
}
/* Read and decode SD Status and check whether UHS mode is supported */
union rt_sd_status sd_status;
err = mmcsd_read_sd_status(card, sd_status.status_words);
if (err)
goto err1;
if ((sd_status.uhs_speed_grade > 0) && (ocr & VDD_165_195))
{
/* Assume the card supports all UHS-I modes because we cannot find any mainstreaming card
* that can support only part of the following modes.
*/
card->flags |= CARD_FLAG_SDR50 | CARD_FLAG_SDR104 | CARD_FLAG_DDR50;
}
/*
* change SD card to the highest supported speed
*/
err = mmcsd_switch(card);
if (err)
goto err1;
/* set bus speed */
max_data_rate = 0U;
if (max_data_rate < card->hs_max_data_rate)
{
max_data_rate = card->hs_max_data_rate;
}
if (max_data_rate < card->max_data_rate)
{
max_data_rate = card->max_data_rate;
}
mmcsd_set_clock(host, max_data_rate);
host->card = card;
return 0;
err1:
rt_free(card);
err2:
return err;
}
/*
* Starting point for SD card init.
*/
rt_int32_t init_sd(struct rt_mmcsd_host *host, rt_uint32_t ocr)
{
rt_int32_t err = -RT_EINVAL;
rt_uint32_t current_ocr;
/*
* We need to get OCR a different way for SPI.
*/
if (controller_is_spi(host))
{
mmcsd_go_idle(host);
err = mmcsd_spi_read_ocr(host, 0, &ocr);
if (err)
goto _err;
}
current_ocr = mmcsd_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
if (!current_ocr)
{
err = -RT_ERROR;
goto _err;
}
/*
* Detect and init the card.
*/
err = mmcsd_sd_init_card(host, current_ocr);
if (err)
goto _err;
mmcsd_host_unlock(host);
err = rt_mmcsd_blk_probe(host->card);
if (err)
goto remove_card;
mmcsd_host_lock(host);
return 0;
remove_card:
mmcsd_host_lock(host);
rt_mmcsd_blk_remove(host->card);
rt_free(host->card);
host->card = RT_NULL;
_err:
LOG_D("init SD card failed!");
return err;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,320 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#include <rtthread.h>
#include "sdhci.h"
#include <rtdbg.h>
#include <mmu.h>
#include <drivers/core/dm.h>
static void rt_plat_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
rt_uint32_t flags = req->cmd->flags;
switch (flags & RESP_MASK)
{
case RESP_NONE:
flags |= MMC_RSP_NONE;
break;
case RESP_R1:
flags |= MMC_RSP_R1;
break;
case RESP_R1B:
flags |= MMC_RSP_R1B;
break;
case RESP_R2:
flags |= MMC_RSP_R2;
break;
case RESP_R3:
flags |= MMC_RSP_R3;
break;
case RESP_R4:
flags |= MMC_RSP_R4;
break;
case RESP_R5:
flags |= MMC_RSP_R5;
break;
case RESP_R6:
flags |= MMC_RSP_R6;
break;
case RESP_R7:
flags |= MMC_RSP_R7;
break;
}
if (req->data)
{
if ((rt_uint64_t)rt_kmem_v2p(req->data->buf) > 0xffffffff)
{
void *dma_buffer = rt_malloc(req->data->blks * req->data->blksize);
void *req_buf = NULL;
if (req->data->flags & DATA_DIR_WRITE)
{
rt_memcpy(dma_buffer, req->data->buf, req->data->blks * req->data->blksize);
req_buf = req->data->buf;
req->data->buf = dma_buffer;
}
else if (req->data->flags & DATA_DIR_READ)
{
req_buf = req->data->buf;
req->data->buf = dma_buffer;
}
req->cmd->flags |= flags;
mmc->ops->request(mmc, req);
rt_sem_take(&host->sem_ack, RT_WAITING_FOREVER);
if (req->data->flags & DATA_DIR_READ)
{
rt_memcpy(req_buf, dma_buffer, req->data->blksize * req->data->blks);
req->data->buf = req_buf;
}else{
req->data->buf = req_buf;
}
rt_free(dma_buffer);
rt_sem_release(&host->sem_ack);
}
else
{
req->cmd->flags |= flags;
mmc->ops->request(mmc, req);
}
}
else
{
req->cmd->flags |= flags;
mmc->ops->request(mmc, req);
}
}
static void rt_plat_set_ioconfig(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *iocfg)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
LOG_D("clock:%d,width:%d,power:%d,vdd:%d,timing:%d\n",
iocfg->clock, iocfg->bus_width,
iocfg->power_mode, iocfg->vdd, iocfg->timing);
mmc->ops->set_ios(mmc, iocfg);
}
static rt_int32_t rt_plat_get_card_status(struct rt_mmcsd_host *host)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
return mmc->ops->get_cd(mmc);
}
static rt_int32_t rt_plat_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
return mmc->ops->execute_tuning(mmc, opcode);
}
static void rt_plat_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
return mmc->ops->enable_sdio_irq(mmc, en);
}
static const struct rt_mmcsd_host_ops rt_mmcsd_ops = {
.request = rt_plat_request,
.set_iocfg = rt_plat_set_ioconfig,
.get_card_status = rt_plat_get_card_status,
.enable_sdio_irq = rt_plat_enable_sdio_irq,
.execute_tuning = rt_plat_execute_tuning,
};
void rt_mmc_request_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq)
{
mmcsd_req_complete(&host->rthost);
}
/*add host in rtt while sdhci complete*/
int rt_mmc_add_host(struct rt_mmc_host *mmc)
{
mmc->rthost.ops = &rt_mmcsd_ops;
mmc->rthost.flags = mmc->caps;
mmc->rthost.freq_max = mmc->f_max;
mmc->rthost.freq_min = 400000;
mmc->rthost.max_dma_segs = mmc->max_segs;
mmc->rthost.max_seg_size = mmc->max_seg_size;
mmc->rthost.max_blk_size = mmc->max_blk_size;
mmc->rthost.max_blk_count = mmc->max_blk_count;
mmc->rthost.valid_ocr = VDD_165_195|VDD_20_21|VDD_21_22|VDD_22_23|VDD_24_25|VDD_25_26|VDD_26_27|VDD_27_28|VDD_28_29|VDD_29_30|VDD_30_31|VDD_32_33|VDD_33_34|VDD_34_35|VDD_35_36;
mmcsd_change(&mmc->rthost);
return 0;
}
struct rt_mmc_host *rt_mmc_alloc_host(int extra, struct rt_device *dev)
{
struct rt_mmc_host *mmc;
mmc = rt_malloc(sizeof(*mmc) + extra);
if (mmc)
{
rt_memset(mmc, 0, sizeof(*mmc) + extra);
mmc->parent = dev;
mmcsd_host_init(&mmc->rthost);
}
return mmc;
}
void rt_mmc_remove_host(struct rt_mmc_host *host)
{
rt_free(host);
}
int rt_mmc_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode)
{
return 0;
}
int rt_mmc_gpio_get_cd(struct rt_mmc_host *host)
{
return -ENOSYS;
}
void rt_mmc_detect_change(struct rt_mmc_host *host, unsigned long delay)
{
}
int rt_mmc_regulator_set_vqmmc(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios)
{
return 0;
}
rt_bool_t rt_mmc_can_gpio_ro(struct rt_mmc_host *host)
{
return RT_FALSE;
}
int rt_mmc_gpio_get_ro(struct rt_mmc_host *host)
{
return 0;
}
int rt_mmc_send_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode)
{
return 0;
}
int rt_mmc_of_parse(struct rt_mmc_host *host)
{
struct rt_device *dev = host->parent;
rt_uint32_t bus_width;
if (!dev || !dev->ofw_node)
return 0;
/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
if (rt_dm_dev_prop_read_u32(dev, "bus-width", &bus_width) < 0)
{
bus_width = 1;
}
switch (bus_width)
{
case 8:
host->caps |= MMC_CAP_8_BIT_DATA;
break; /* Hosts capable of 8-bit can also do 4 bits */
case 4:
host->caps |= MMC_CAP_4_BIT_DATA;
break;
case 1:
break;
default:
return -EINVAL;
}
/* f_max is obtained from the optional "max-frequency" property */
rt_dm_dev_prop_read_u32(dev, "max-frequency", &host->f_max);
if (rt_dm_dev_prop_read_bool(dev, "cap-mmc-highspeed"))
{
host->caps |= MMC_CAP_MMC_HIGHSPEED;
}
if (rt_dm_dev_prop_read_bool(dev, "mmc-hs200-1_8v"))
{
host->caps |= MMC_CAP2_HS200_1_8V_SDR;
}
if (rt_dm_dev_prop_read_bool(dev, "non-removable"))
{
host->caps |= MMC_CAP_NONREMOVABLE;
}
if (rt_dm_dev_prop_read_bool(dev, "no-sdio"))
{
host->caps2 |= MMC_CAP2_NO_SDIO;
}
if (rt_dm_dev_prop_read_bool(dev, "no-sd"))
{
host->caps2 |= MMC_CAP2_NO_SD;
}
if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-3_3v"))
{
host->caps |= MMC_CAP_3_3V_DDR;
}
if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-1_8v"))
{
host->caps |= MMC_CAP_1_8V_DDR;
}
if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-1_2v"))
{
host->caps |= MMC_CAP_1_2V_DDR;
}
return 0;
}
void rt_mmc_free_host(struct rt_mmc_host *host)
{
}
rt_bool_t rt_mmc_can_gpio_cd(struct rt_mmc_host *host)
{
return RT_FALSE;
}
int mmc_regulator_get_supply(struct rt_mmc_host *mmc)
{
mmc->supply.vmmc = -RT_NULL;
mmc->supply.vqmmc = -RT_NULL;
return 0;
}
int regulator_get_current_limit(struct regulator *regulator)
{
return 0;
}
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
return 0;
}

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#ifndef _DRIVERS_MMC_RT_SDHCI_PLTFM_H
#define _DRIVERS_MMC_RT_SDHCI_PLTFM_H
#include <rtthread.h>
#include <drivers/core/dm.h>
#include <drivers/ofw.h>
#include <drivers/platform.h>
#include <drivers/clk.h>
#include "sdhci.h"
struct rt_sdhci_pltfm_data
{
const struct rt_sdhci_ops *ops;
unsigned int quirks;
unsigned int quirks2;
};
struct rt_sdhci_pltfm_host
{
struct rt_clk *clk;
unsigned int clock;
rt_uint64_t xfer_mode_shadow;
unsigned long private[];
};
void rt_sdhci_get_property(struct rt_platform_device *pdev);
static inline void sdhci_get_of_property(struct rt_platform_device *pdev)
{
return rt_sdhci_get_property(pdev);
}
extern struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev,
const struct rt_sdhci_pltfm_data *pdata,
size_t priv_size);
extern void rt_sdhci_pltfm_free(struct rt_platform_device *pdev);
extern int rt_sdhci_pltfm_init_and_add_host(struct rt_platform_device *pdev,
const struct rt_sdhci_pltfm_data *pdata,
size_t priv_size);
extern void rt_sdhci_pltfm_remove(struct rt_platform_device *pdev);
extern unsigned int rt_sdhci_pltfm_clk_get_max_clock(struct rt_sdhci_host *host);
static inline void *sdhci_pltfm_priv(struct rt_sdhci_pltfm_host *host)
{
return host->private;
}
static inline int sdhci_pltfm_suspend(struct rt_device *dev)
{
return 0;
}
static inline int sdhci_pltfm_resume(struct rt_device *dev)
{
return 0;
}
#endif

View File

@@ -0,0 +1,677 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#ifndef __RT_SDHCI_HW_H
#define __RT_SDHCI_HW_H
#include "sdhci_host.h"
#include "sdhci_misc.h"
#include "sdhci-platform.h"
#include <drivers/mmcsd_cmd.h>
#include <drivers/dev_mmcsd_core.h>
#include <drivers/mmcsd_host.h>
#include <rtdevice.h>
#define lower_32_bits(n) ((rt_uint32_t)((n) & 0xffffffff))
#define upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16))
#define MAX_TUNING_LOOP 40
/*
* Controller registers
*/
#define RT_SDHCI_DMA_ADDRESS 0x00
#define RT_SDHCI_ARGUMENT2 RT_SDHCI_DMA_ADDRESS
#define RT_SDHCI_32BIT_BLK_CNT RT_SDHCI_DMA_ADDRESS
#define RT_SDHCI_BLOCK_SIZE 0x04
#define RT_SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
#define RT_SDHCI_BLOCK_COUNT 0x06
#define RT_SDHCI_ARGUMENT 0x08
#define RT_SDHCI_TRANSFER_MODE 0x0C
#define RT_SDHCI_TRNS_DMA 0x01
#define RT_SDHCI_TRNS_BLK_CNT_EN 0x02
#define RT_SDHCI_TRNS_AUTO_CMD12 0x04
#define RT_SDHCI_TRNS_AUTO_CMD23 0x08
#define RT_SDHCI_TRNS_AUTO_SEL 0x0C
#define RT_SDHCI_TRNS_READ 0x10
#define RT_SDHCI_TRNS_MULTI 0x20
#define RT_SDHCI_COMMAND 0x0E
#define RT_SDHCI_CMD_RESP_MASK 0x03
#define RT_SDHCI_CMD_CRC 0x08
#define RT_SDHCI_CMD_INDEX 0x10
#define RT_SDHCI_CMD_DATA 0x20
#define RT_SDHCI_CMD_ABORTCMD 0xC0
#define RT_SDHCI_CMD_RESP_NONE 0x00
#define RT_SDHCI_CMD_RESP_LONG 0x01
#define RT_SDHCI_CMD_RESP_SHORT 0x02
#define RT_SDHCI_CMD_RESP_SHORT_BUSY 0x03
#define RT_SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
#define RT_SDHCI_GET_CMD(c) ((c >> 8) & 0x3f)
#define RT_SDHCI_RESPONSE 0x10
#define RT_SDHCI_BUFFER 0x20
#define RT_SDHCI_PRESENT_STATE 0x24
#define RT_SDHCI_CMD_INHIBIT 0x00000001
#define RT_SDHCI_DATA_INHIBIT 0x00000002
#define RT_SDHCI_DOING_WRITE 0x00000100
#define RT_SDHCI_DOING_READ 0x00000200
#define RT_SDHCI_SPACE_AVAILABLE 0x00000400
#define RT_SDHCI_DATA_AVAILABLE 0x00000800
#define RT_SDHCI_CARD_PRESENT 0x00010000
#define RT_SDHCI_CARD_PRES_SHIFT 16
#define RT_SDHCI_CD_STABLE 0x00020000
#define RT_SDHCI_CD_LVL 0x00040000
#define RT_SDHCI_CD_LVL_SHIFT 18
#define RT_SDHCI_WRITE_PROTECT 0x00080000
#define RT_SDHCI_DATA_LVL_MASK 0x00F00000
#define RT_SDHCI_DATA_LVL_SHIFT 20
#define RT_SDHCI_DATA_0_LVL_MASK 0x00100000
#define RT_SDHCI_CMD_LVL 0x01000000
#define RT_SDHCI_HOST_CONTROL 0x28
#define RT_SDHCI_CTRL_LED 0x01
#define RT_SDHCI_CTRL_4BITBUS 0x02
#define RT_SDHCI_CTRL_HISPD 0x04
#define RT_SDHCI_CTRL_DMA_MASK 0x18
#define RT_SDHCI_CTRL_SDMA 0x00
#define RT_SDHCI_CTRL_ADMA1 0x08
#define RT_SDHCI_CTRL_ADMA32 0x10
#define RT_SDHCI_CTRL_ADMA64 0x18
#define RT_SDHCI_CTRL_ADMA3 0x18
#define RT_SDHCI_CTRL_8BITBUS 0x20
#define RT_SDHCI_CTRL_CDTEST_INS 0x40
#define RT_SDHCI_CTRL_CDTEST_EN 0x80
#define RT_SDHCI_POWER_CONTROL 0x29
#define RT_SDHCI_POWER_ON 0x01
#define RT_SDHCI_POWER_180 0x0A
#define RT_SDHCI_POWER_300 0x0C
#define RT_SDHCI_POWER_330 0x0E
/*
* VDD2 - UHS2 or PCIe/NVMe
* VDD2 power on/off and voltage select
*/
#define RT_SDHCI_VDD2_POWER_ON 0x10
#define RT_SDHCI_VDD2_POWER_120 0x80
#define RT_SDHCI_VDD2_POWER_180 0xA0
#define RT_SDHCI_BLOCK_GAP_CONTROL 0x2A
#define RT_SDHCI_WAKE_UP_CONTROL 0x2B
#define RT_SDHCI_WAKE_ON_INT 0x01
#define RT_SDHCI_WAKE_ON_INSERT 0x02
#define RT_SDHCI_WAKE_ON_REMOVE 0x04
#define RT_SDHCI_CLOCK_CONTROL 0x2C
#define RT_SDHCI_DIVIDER_SHIFT 8
#define RT_SDHCI_DIVIDER_HI_SHIFT 6
#define RT_SDHCI_DIV_MASK 0xFF
#define RT_SDHCI_DIV_MASK_LEN 8
#define RT_SDHCI_DIV_HI_MASK 0x300
#define RT_SDHCI_PROG_CLOCK_MODE 0x0020
#define RT_SDHCI_CLOCK_CARD_EN 0x0004
#define RT_SDHCI_CLOCK_PLL_EN 0x0008
#define RT_SDHCI_CLOCK_INT_STABLE 0x0002
#define RT_SDHCI_CLOCK_INT_EN 0x0001
#define RT_SDHCI_TIMEOUT_CONTROL 0x2E
#define RT_SDHCI_SOFTWARE_RESET 0x2F
#define RT_SDHCI_RESET_ALL 0x01
#define RT_SDHCI_RESET_CMD 0x02
#define RT_SDHCI_RESET_DATA 0x04
#define RT_SDHCI_INT_STATUS 0x30
#define RT_SDHCI_INT_ENABLE 0x34
#define RT_SDHCI_SIGNAL_ENABLE 0x38
#define RT_SDHCI_INT_RESPONSE 0x00000001
#define RT_SDHCI_INT_DATA_END 0x00000002
#define RT_SDHCI_INT_BLK_GAP 0x00000004
#define RT_SDHCI_INT_DMA_END 0x00000008
#define RT_SDHCI_INT_SPACE_AVAIL 0x00000010
#define RT_SDHCI_INT_DATA_AVAIL 0x00000020
#define RT_SDHCI_INT_CARD_INSERT 0x00000040
#define RT_SDHCI_INT_CARD_REMOVE 0x00000080
#define RT_SDHCI_INT_CARD_INT 0x00000100
#define RT_SDHCI_INT_RETUNE 0x00001000
#define RT_SDHCI_INT_CQE 0x00004000
#define RT_SDHCI_INT_ERROR 0x00008000
#define RT_SDHCI_INT_TIMEOUT 0x00010000
#define RT_SDHCI_INT_CRC 0x00020000
#define RT_SDHCI_INT_END_BIT 0x00040000
#define RT_SDHCI_INT_INDEX 0x00080000
#define RT_SDHCI_INT_DATA_TIMEOUT 0x00100000
#define RT_SDHCI_INT_DATA_CRC 0x00200000
#define RT_SDHCI_INT_DATA_END_BIT 0x00400000
#define RT_SDHCI_INT_BUS_POWER 0x00800000
#define RT_SDHCI_INT_AUTO_CMD_ERR 0x01000000
#define RT_SDHCI_INT_ADMA_ERROR 0x02000000
#define RT_SDHCI_INT_NORMAL_MASK 0x00007FFF
#define RT_SDHCI_INT_ERROR_MASK 0xFFFF8000
#define RT_SDHCI_INT_CMD_MASK (RT_SDHCI_INT_RESPONSE | RT_SDHCI_INT_TIMEOUT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_INDEX | RT_SDHCI_INT_AUTO_CMD_ERR)
#define RT_SDHCI_INT_DATA_MASK (RT_SDHCI_INT_DATA_END | RT_SDHCI_INT_DMA_END | RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL | RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_ADMA_ERROR | RT_SDHCI_INT_BLK_GAP)
#define RT_SDHCI_INT_ALL_MASK ((unsigned int)-1)
#define RT_SDHCI_CQE_INT_ERR_MASK ( \
RT_SDHCI_INT_ADMA_ERROR | RT_SDHCI_INT_BUS_POWER | RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_INDEX | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_TIMEOUT)
#define RT_SDHCI_CQE_INT_MASK (RT_SDHCI_CQE_INT_ERR_MASK | RT_SDHCI_INT_CQE)
#define RT_SDHCI_AUTO_CMD_STATUS 0x3C
#define RT_SDHCI_AUTO_CMD_TIMEOUT 0x00000002
#define RT_SDHCI_AUTO_CMD_CRC 0x00000004
#define RT_SDHCI_AUTO_CMD_END_BIT 0x00000008
#define RT_SDHCI_AUTO_CMD_INDEX 0x00000010
#define RT_SDHCI_HOST_CONTROL2 0x3E
#define RT_SDHCI_CTRL_UHS_MASK 0x0007
#define RT_SDHCI_CTRL_UHS_SDR12 0x0000
#define RT_SDHCI_CTRL_UHS_SDR25 0x0001
#define RT_SDHCI_CTRL_UHS_SDR50 0x0002
#define RT_SDHCI_CTRL_UHS_SDR104 0x0003
#define RT_SDHCI_CTRL_UHS_DDR50 0x0004
#define RT_SDHCI_CTRL_HS400 0x0005 /* Non-standard */
#define RT_SDHCI_CTRL_VDD_180 0x0008
#define RT_SDHCI_CTRL_DRV_TYPE_MASK 0x0030
#define RT_SDHCI_CTRL_DRV_TYPE_B 0x0000
#define RT_SDHCI_CTRL_DRV_TYPE_A 0x0010
#define RT_SDHCI_CTRL_DRV_TYPE_C 0x0020
#define RT_SDHCI_CTRL_DRV_TYPE_D 0x0030
#define RT_SDHCI_CTRL_EXEC_TUNING 0x0040
#define RT_SDHCI_CTRL_TUNED_CLK 0x0080
#define RT_SDHCI_CMD23_ENABLE 0x0800
#define RT_SDHCI_CTRL_V4_MODE 0x1000
#define RT_SDHCI_CTRL_64BIT_ADDR 0x2000
#define RT_SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
#define RT_SDHCI_CAPABILITIES 0x40
#define RT_SDHCI_TIMEOUT_CLK_MASK RT_GENMASK(5, 0)
#define RT_SDHCI_TIMEOUT_CLK_SHIFT 0
#define RT_SDHCI_TIMEOUT_CLK_UNIT 0x00000080
#define RT_SDHCI_CLOCK_BASE_MASK RT_GENMASK(13, 8)
#define RT_SDHCI_CLOCK_BASE_SHIFT 8
#define RT_SDHCI_CLOCK_V3_BASE_MASK RT_GENMASK(15, 8)
#define RT_SDHCI_MAX_BLOCK_MASK 0x00030000
#define RT_SDHCI_MAX_BLOCK_SHIFT 16
#define RT_SDHCI_CAN_DO_8BIT 0x00040000
#define RT_SDHCI_CAN_DO_ADMA2 0x00080000
#define RT_SDHCI_CAN_DO_ADMA1 0x00100000
#define RT_SDHCI_CAN_DO_HISPD 0x00200000
#define RT_SDHCI_CAN_DO_SDMA 0x00400000
#define RT_SDHCI_CAN_DO_SUSPEND 0x00800000
#define RT_SDHCI_CAN_VDD_330 0x01000000
#define RT_SDHCI_CAN_VDD_300 0x02000000
#define RT_SDHCI_CAN_VDD_180 0x04000000
#define RT_SDHCI_CAN_64BIT_V4 0x08000000
#define RT_SDHCI_CAN_64BIT 0x10000000
#define RT_SDHCI_CAPABILITIES_1 0x44
#define RT_SDHCI_SUPPORT_SDR50 0x00000001
#define RT_SDHCI_SUPPORT_SDR104 0x00000002
#define RT_SDHCI_SUPPORT_DDR50 0x00000004
#define RT_SDHCI_DRIVER_TYPE_A 0x00000010
#define RT_SDHCI_DRIVER_TYPE_C 0x00000020
#define RT_SDHCI_DRIVER_TYPE_D 0x00000040
#define RT_SDHCI_RETUNING_TIMER_COUNT_MASK RT_GENMASK(11, 8)
#define RT_SDHCI_USE_SDR50_TUNING 0x00002000
#define RT_SDHCI_RETUNING_MODE_MASK RT_GENMASK(15, 14)
#define RT_SDHCI_CLOCK_MUL_MASK RT_GENMASK(23, 16)
#define RT_SDHCI_CAN_DO_ADMA3 0x08000000
#define RT_SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */
#define RT_SDHCI_MAX_CURRENT 0x48
#define RT_SDHCI_MAX_CURRENT_LIMIT RT_GENMASK(7, 0)
#define RT_SDHCI_MAX_CURRENT_330_MASK RT_GENMASK(7, 0)
#define RT_SDHCI_MAX_CURRENT_300_MASK RT_GENMASK(15, 8)
#define RT_SDHCI_MAX_CURRENT_180_MASK RT_GENMASK(23, 16)
#define RT_SDHCI_MAX_CURRENT_MULTIPLIER 4
/* 4C-4F reserved for more max current */
#define RT_SDHCI_SET_ACMD12_ERROR 0x50
#define RT_SDHCI_SET_INT_ERROR 0x52
#define RT_SDHCI_ADMA_ERROR 0x54
/* 55-57 reserved */
#define RT_SDHCI_ADMA_ADDRESS 0x58
#define RT_SDHCI_ADMA_ADDRESS_HI 0x5C
/* 60-FB reserved */
#define RT_SDHCI_PRESET_FOR_HIGH_SPEED 0x64
#define RT_SDHCI_PRESET_FOR_SDR12 0x66
#define RT_SDHCI_PRESET_FOR_SDR25 0x68
#define RT_SDHCI_PRESET_FOR_SDR50 0x6A
#define RT_SDHCI_PRESET_FOR_SDR104 0x6C
#define RT_SDHCI_PRESET_FOR_DDR50 0x6E
#define RT_SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */
#define RT_SDHCI_PRESET_DRV_MASK RT_GENMASK(15, 14)
#define BIT(nr) ((1) << (nr))
#define RT_SDHCI_PRESET_CLKGEN_SEL BIT(10)
#define RT_SDHCI_PRESET_SDCLK_FREQ_MASK RT_GENMASK(9, 0)
#define RT_SDHCI_SLOT_INT_STATUS 0xFC
#define RT_SDHCI_HOST_VERSION 0xFE
#define RT_SDHCI_VENDOR_VER_MASK 0xFF00
#define RT_SDHCI_VENDOR_VER_SHIFT 8
#define RT_SDHCI_SPEC_VER_MASK 0x00FF
#define RT_SDHCI_SPEC_VER_SHIFT 0
#define RT_SDHCI_SPEC_100 0
#define RT_SDHCI_SPEC_200 1
#define RT_SDHCI_SPEC_300 2
#define RT_SDHCI_SPEC_400 3
#define RT_SDHCI_SPEC_410 4
#define RT_SDHCI_SPEC_420 5
/*
* End of controller registers.
*/
#define RT_SDHCI_MAX_DIV_SPEC_200 256
#define RT_SDHCI_MAX_DIV_SPEC_300 2046
/*
* Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2.
*/
#define RT_SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
#define ilog2(v) __rt_ffs(v)
#define RT_SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(RT_SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
#define RT_SDHCI_MAX_SEGS 128
/* Allow for a command request and a data request at the same time */
#define RT_SDHCI_MAX_MRQS 2
#define MMC_CMD_TRANSFER_TIME (10 * 1000000L) /* max 10 ms */
enum rt_sdhci_cookie
{
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
COOKIE_MAPPED, /* mapped by sdhci_prepare_data() */
};
struct rt_sdhci_host
{
const char *hw_name; /* Hardware bus name */
unsigned int quirks; /* Deviations from spec. */
void *data_buf;
/* Controller doesn't honor resets unless we touch the clock register */
#define RT_SDHCI_QUIRK_CLOCK_BEFORE_RESET (1 << 0)
/* Controller has bad caps bits, but really supports DMA */
#define RT_SDHCI_QUIRK_FORCE_DMA (1 << 1)
/* Controller doesn't like to be reset when there is no card inserted. */
#define RT_SDHCI_QUIRK_NO_CARD_NO_RESET (1 << 2)
/* Controller doesn't like clearing the power reg before a change */
#define RT_SDHCI_QUIRK_SINGLE_POWER_WRITE (1 << 3)
/* Controller has an unusable DMA engine */
#define RT_SDHCI_QUIRK_BROKEN_DMA (1 << 5)
/* Controller has an unusable ADMA engine */
#define RT_SDHCI_QUIRK_BROKEN_ADMA (1 << 6)
/* Controller can only DMA from 32-bit aligned addresses */
#define RT_SDHCI_QUIRK_32BIT_DMA_ADDR (1 << 7)
/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
#define RT_SDHCI_QUIRK_32BIT_DMA_SIZE (1 << 8)
/* Controller can only ADMA chunks that are a multiple of 32 bits */
#define RT_SDHCI_QUIRK_32BIT_ADMA_SIZE (1 << 9)
/* Controller needs to be reset after each request to stay stable */
#define RT_SDHCI_QUIRK_RESET_AFTER_REQUEST (1 << 10)
/* Controller needs voltage and power writes to happen separately */
#define RT_SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1 << 11)
/* Controller provides an incorrect timeout value for transfers */
#define RT_SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1 << 12)
/* Controller has an issue with buffer bits for small transfers */
#define RT_SDHCI_QUIRK_BROKEN_SMALL_PIO (1 << 13)
/* Controller does not provide transfer-complete interrupt when not busy */
#define RT_SDHCI_QUIRK_NO_BUSY_IRQ (1 << 14)
/* Controller has unreliable card detection */
#define RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION (1 << 15)
/* Controller reports inverted write-protect state */
#define RT_SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1 << 16)
/* Controller has unusable command queue engine */
#define RT_SDHCI_QUIRK_BROKEN_CQE (1 << 17)
/* Controller does not like fast PIO transfers */
#define RT_SDHCI_QUIRK_PIO_NEEDS_DELAY (1 << 18)
/* Controller does not have a LED */
#define RT_SDHCI_QUIRK_NO_LED (1 << 19)
/* Controller has to be forced to use block size of 2048 bytes */
#define RT_SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1 << 20)
/* Controller cannot do multi-block transfers */
#define RT_SDHCI_QUIRK_NO_MULTIBLOCK (1 << 21)
/* Controller can only handle 1-bit data transfers */
#define RT_SDHCI_QUIRK_FORCE_1_BIT_DATA (1 << 22)
/* Controller needs 10ms delay between applying power and clock */
#define RT_SDHCI_QUIRK_DELAY_AFTER_POWER (1 << 23)
/* Controller uses SDCLK instead of TMCLK for data timeouts */
#define RT_SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1 << 24)
/* Controller reports wrong base clock capability */
#define RT_SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1 << 25)
/* Controller cannot support End Attribute in NOP ADMA descriptor */
#define RT_SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1 << 26)
/* Controller uses Auto CMD12 command to stop the transfer */
#define RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1 << 28)
/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
#define RT_SDHCI_QUIRK_NO_HISPD_BIT (1 << 29)
/* Controller treats ADMA descriptors with length 0000h incorrectly */
#define RT_SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1 << 30)
/* The read-only detection via RT_SDHCI_PRESENT_STATE register is unstable */
#define RT_SDHCI_QUIRK_UNSTABLE_RO_DETECT (1 << 31)
unsigned int quirks2; /* More deviations from spec. */
#define RT_SDHCI_QUIRK2_HOST_OFF_CARD_ON (1 << 0)
#define RT_SDHCI_QUIRK2_HOST_NO_CMD23 (1 << 1)
/* The system physically doesn't support 1.8v, even if the host does */
#define RT_SDHCI_QUIRK2_NO_1_8_V (1 << 2)
#define RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1 << 3)
#define RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON (1 << 4)
/* Controller has a non-standard host control register */
#define RT_SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1 << 5)
/* Controller does not support HS200 */
#define RT_SDHCI_QUIRK2_BROKEN_HS200 (1 << 6)
/* Controller does not support DDR50 */
#define RT_SDHCI_QUIRK2_BROKEN_DDR50 (1 << 7)
/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
#define RT_SDHCI_QUIRK2_STOP_WITH_TC (1 << 8)
/* Controller does not support 64-bit DMA */
#define RT_SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1 << 9)
/* need clear transfer mode register before send cmd */
#define RT_SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1 << 10)
/* Capability register bit-63 indicates HS400 support */
#define RT_SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1 << 11)
/* forced tuned clock */
#define RT_SDHCI_QUIRK2_TUNING_WORK_AROUND (1 << 12)
/* disable the block count for single block transactions */
#define RT_SDHCI_QUIRK2_SUPPORT_SINGLE (1 << 13)
/* Controller broken with using ACMD23 */
#define RT_SDHCI_QUIRK2_ACMD23_BROKEN (1 << 14)
/* Broken Clock divider zero in controller */
#define RT_SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1 << 15)
/* Controller has CRC in 136 bit Command Response */
#define RT_SDHCI_QUIRK2_RSP_136_HAS_CRC (1 << 16)
#define RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT (1 << 17)
#define RT_SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1 << 18)
/* Issue CMD and DATA reset together */
#define RT_SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER (1 << 19)
int irq; /* Device IRQ */
void *ioaddr; /* Mapped address */
char *bounce_buffer; /* For packing SDMA reads/writes */
rt_uint64_t bounce_addr;
unsigned int bounce_buffer_size;
const struct rt_sdhci_ops *ops; /* Low level hw interface */
/* Internal data */
struct rt_mmc_host *mmc; /* MMC structure */
struct mmc_host_ops mmc_host_ops; /* MMC host ops */
rt_uint64_t dma_mask; /* custom DMA mask */
rt_spinlock_t lock;
int flags; /* Host attributes */
#define RT_SDHCI_USE_SDMA (1 << 0) /* Host is SDMA capable */
#define RT_SDHCI_USE_ADMA (1 << 1) /* Host is ADMA capable */
#define RT_SDHCI_REQ_USE_DMA (1 << 2) /* Use DMA for this req. */
#define RT_SDHCI_DEVICE_DEAD (1 << 3) /* Device unresponsive */
#define RT_SDHCI_SDR50_NEEDS_TUNING (1 << 4) /* SDR50 needs tuning */
#define RT_SDHCI_AUTO_CMD12 (1 << 6) /* Auto CMD12 support */
#define RT_SDHCI_AUTO_CMD23 (1 << 7) /* Auto CMD23 support */
#define RT_SDHCI_PV_ENABLED (1 << 8) /* Preset value enabled */
#define RT_SDHCI_USE_64_BIT_DMA (1 << 12) /* Use 64-bit DMA */
#define RT_SDHCI_HS400_TUNING (1 << 13) /* Tuning for HS400 */
#define RT_SDHCI_SIGNALING_330 (1 << 14) /* Host is capable of 3.3V signaling */
#define RT_SDHCI_SIGNALING_180 (1 << 15) /* Host is capable of 1.8V signaling */
#define RT_SDHCI_SIGNALING_120 (1 << 16) /* Host is capable of 1.2V signaling */
unsigned int version; /* RT_SDHCI spec. version */
unsigned int max_clk; /* Max possible freq (MHz) */
unsigned int timeout_clk; /* Timeout freq (KHz) */
rt_uint8_t max_timeout_count; /* Vendor specific max timeout count */
unsigned int clk_mul; /* Clock Muliplier value */
unsigned int clock; /* Current clock (MHz) */
rt_uint8_t pwr; /* Current voltage */
rt_uint8_t drv_type; /* Current UHS-I driver type */
rt_bool_t reinit_uhs; /* Force UHS-related re-initialization */
rt_bool_t runtime_suspended; /* Host is runtime suspended */
rt_bool_t bus_on; /* Bus power prevents runtime suspend */
rt_bool_t preset_enabled; /* Preset is enabled */
rt_bool_t pending_reset; /* Cmd/data reset is pending */
rt_bool_t irq_wake_enabled; /* IRQ wakeup is enabled */
rt_bool_t v4_mode; /* Host Version 4 Enable */
rt_bool_t always_defer_done; /* Always defer to complete requests */
struct rt_mmcsd_req *mrqs_done[RT_SDHCI_MAX_MRQS]; /* Requests done */
struct rt_mmcsd_cmd *cmd; /* Current command */
struct rt_mmcsd_cmd *data_cmd; /* Current data command */
struct rt_mmcsd_cmd *deferred_cmd; /* Deferred command */
struct rt_mmcsd_data *data; /* Current data request */
unsigned int data_early : 1; /* Data finished before cmd */
unsigned int blocks; /* remaining PIO blocks */
size_t align_buffer_sz; /* Bounce buffer size */
rt_uint64_t align_addr; /* Mapped bounce buffer */
struct rt_workqueue *complete_wq; /* Request completion wq */
struct rt_work complete_work; /* Request completion work */
struct rt_workqueue *irq_wq;
struct rt_work irq_work;
struct rt_timer timer; /* Timer for timeouts */
struct rt_timer data_timer; /* Timer for data timeouts */
rt_uint32_t caps; /* CAPABILITY_0 */
rt_uint32_t caps1; /* CAPABILITY_1 */
rt_bool_t read_caps; /* Capability flags have been read */
rt_bool_t sdhci_core_to_disable_vqmmc; /* sdhci core can disable vqmmc */
unsigned int ocr_avail_sdio; /* OCR bit masks */
unsigned int ocr_avail_sd;
unsigned int ocr_avail_mmc;
rt_uint32_t ocr_mask; /* available voltages */
unsigned timing; /* Current timing */
rt_uint32_t thread_isr;
/* cached registers */
rt_uint32_t ier;
rt_bool_t cqe_on; /* CQE is operating */
rt_uint32_t cqe_ier; /* CQE interrupt mask */
rt_uint32_t cqe_err_ier; /* CQE error interrupt mask */
rt_wqueue_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
unsigned int tuning_count; /* Timer count for re-tuning */
unsigned int tuning_mode; /* Re-tuning mode supported by host */
unsigned int tuning_err; /* Error code for re-tuning */
#define RT_SDHCI_TUNING_MODE_1 0
#define RT_SDHCI_TUNING_MODE_2 1
#define RT_SDHCI_TUNING_MODE_3 2
/* Delay (ms) between tuning commands */
int tuning_delay;
int tuning_loop_count;
/* Host SDMA buffer boundary. */
rt_uint32_t sdma_boundary;
rt_uint64_t data_timeout;
unsigned long private[];
};
static inline rt_uint8_t u8_read(const volatile void *addr)
{
return *(const volatile rt_uint8_t *)addr;
}
static inline rt_uint16_t u16_read(const volatile void *addr)
{
return *(const volatile rt_uint16_t *)addr;
}
static inline rt_uint32_t u32_read(const volatile void *addr)
{
return *(const volatile rt_uint32_t *)addr;
}
static inline void u8_write(rt_uint8_t value, volatile void *addr)
{
*(volatile rt_uint8_t *)addr = value;
}
static inline void u16_write(rt_uint16_t value, volatile void *addr)
{
*(volatile rt_uint16_t *)addr = value;
}
static inline void u32_write(rt_uint32_t value, volatile void *addr)
{
*(volatile rt_uint32_t *)addr = value;
}
#define readb(c) u8_read(c)
#define readw(c) u16_read(c)
#define readl(c) u32_read(c)
#define readsb(p, d, l) ({ __raw_readsb(p,d,l); __iormb(); })
#define readsw(p, d, l) ({ __raw_readsw(p,d,l); __iormb(); })
#define readsl(p, d, l) ({ __raw_readsl(p,d,l); __iormb(); })
#define writeb(v, c) u8_write(v, c)
#define writew(v, c) u16_write(v, c)
#define writel(v, c) u32_write(v, c)
#define writesb(p, d, l) ({ __iowmb(); __raw_writesb(p,d,l); })
#define writesw(p, d, l) ({ __iowmb(); __raw_writesw(p,d,l); })
#define writesl(p, d, l) ({ __iowmb(); __raw_writesl(p,d,l); })
static inline void rt_sdhci_writel(struct rt_sdhci_host *host, rt_uint32_t val, int reg)
{
writel(val, host->ioaddr + reg);
}
static inline void rt_sdhci_writew(struct rt_sdhci_host *host, rt_uint16_t val, int reg)
{
writew(val, host->ioaddr + reg);
}
static inline void rt_sdhci_writeb(struct rt_sdhci_host *host, rt_uint8_t val, int reg)
{
writeb(val, host->ioaddr + reg);
}
static inline rt_uint32_t rt_sdhci_readl(struct rt_sdhci_host *host, int reg)
{
return readl(host->ioaddr + reg);
}
static inline rt_uint16_t rt_sdhci_readw(struct rt_sdhci_host *host, int reg)
{
return readw(host->ioaddr + reg);
}
static inline rt_uint8_t rt_sdhci_readb(struct rt_sdhci_host *host, int reg)
{
return readb(host->ioaddr + reg);
}
struct rt_sdhci_ops
{
void (*set_clock)(struct rt_sdhci_host *host, unsigned int clock);
void (*set_power)(struct rt_sdhci_host *host, unsigned char mode,
unsigned short vdd);
rt_uint32_t (*irq)(struct rt_sdhci_host *host, rt_uint32_t intmask);
int (*set_dma_mask)(struct rt_sdhci_host *host);
int (*enable_dma)(struct rt_sdhci_host *host);
unsigned int (*get_max_clock)(struct rt_sdhci_host *host);
unsigned int (*get_min_clock)(struct rt_sdhci_host *host);
unsigned int (*get_timeout_clock)(struct rt_sdhci_host *host);
unsigned int (*get_max_timeout_count)(struct rt_sdhci_host *host);
void (*set_timeout)(struct rt_sdhci_host *host,
struct rt_mmcsd_cmd *cmd);
void (*set_bus_width)(struct rt_sdhci_host *host, int width);
unsigned int (*get_ro)(struct rt_sdhci_host *host);
void (*reset)(struct rt_sdhci_host *host, rt_uint8_t mask);
int (*platform_execute_tuning)(struct rt_sdhci_host *host, rt_uint32_t opcode);
void (*set_uhs_signaling)(struct rt_sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct rt_sdhci_host *host);
void (*card_event)(struct rt_sdhci_host *host);
void (*voltage_switch)(struct rt_sdhci_host *host);
void (*request_done)(struct rt_sdhci_host *host,
struct rt_mmcsd_req *mrq);
};
struct rt_sdhci_host *rt_sdhci_alloc_host(struct rt_device *dev, size_t priv_size);
void rt_sdhci_free_host(struct rt_sdhci_host *host);
static inline void *sdhci_priv(struct rt_sdhci_host *host)
{
return host->private;
}
void rt_sdhci_read_caps(struct rt_sdhci_host *host, const rt_uint16_t *ver,
const rt_uint32_t *caps, const rt_uint32_t *caps1);
int rt_sdhci_setup_host(struct rt_sdhci_host *host);
void rt_sdhci_cleanup_host(struct rt_sdhci_host *host);
int rt_sdhci_set_and_add_host(struct rt_sdhci_host *host);
int rt_sdhci_init_host(struct rt_sdhci_host *host);
void rt_sdhci_uninit_host(struct rt_sdhci_host *host, int dead);
rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock,
unsigned int *actual_clock);
void rt_sdhci_set_clock(struct rt_sdhci_host *host, unsigned int clock);
void rt_sdhci_clk_enable(struct rt_sdhci_host *host, rt_uint16_t clk);
void rt_sdhci_set_power(struct rt_sdhci_host *host, unsigned char mode,unsigned short vdd);
void rt_read_reg(struct rt_sdhci_host* host);
void rt_sdhci_set_power_with_noreg(struct rt_sdhci_host *host, unsigned char mode,
unsigned short vdd);
void rt_sdhci_start_request(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq);
int rt_sdhci_start_request_atomic(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq);
void rt_sdhci_set_bus_width(struct rt_sdhci_host *host, int width);
void rt_sdhci_reset(struct rt_sdhci_host *host, rt_uint8_t mask);
void rt_sdhci_set_uhs(struct rt_sdhci_host *host, unsigned timing);
int rt_sdhci_execute_tuning(struct rt_mmc_host *mmc, rt_uint32_t opcode);
int __sdhci_execute_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode);
void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios);
int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc,
struct rt_mmcsd_io_cfg *ios);
void rt_sdhci_enable_io_irq(struct rt_mmc_host *mmc, int enable);
void rt_sdhci_start_tuning(struct rt_sdhci_host *host);
void rt_sdhci_end_tuning(struct rt_sdhci_host *host);
void rt_sdhci_reset_tuning(struct rt_sdhci_host *host);
void rt_sdhci_send_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode);
void rt_sdhci_abort_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode);
void rt_sdhci_data_irq_timeout(struct rt_sdhci_host *host, rt_bool_t enable);
void rt_sdhci_timeout_set(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd);
void rt_read_reg_debug(struct rt_sdhci_host* host);
#endif /* __RT_SDHCI_HW_H */

View File

@@ -0,0 +1,345 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#ifndef __RT_SDHCI_MMC_H__
#define __RT_SDHCI_MMC_H__
#include <drivers/dev_mmcsd_core.h>
#include <rtthread.h>
#include <drivers/mmcsd_cmd.h>
#include <drivers/dev_mmcsd_core.h>
#include <drivers/mmcsd_host.h>
#define mmc_dev(x) ((x)->parent)
#define MMC_SEND_TUNING_BLOCK_HS200 SEND_TUNING_BLOCK_HS200
#define MMC_SEND_TUNING_BLOCK SEND_TUNING_BLOCK
#define MMC_STOP_TRANSMISSION STOP_TRANSMISSION
#define MMC_BUS_TEST_R 14 /* adtc R1 */
#define MMC_WRITE_MULTIPLE_BLOCK WRITE_MULTIPLE_BLOCK
#define MMC_READ_MULTIPLE_BLOCK READ_MULTIPLE_BLOCK
#define MMC_TIMING_UHS_DDR50 MMCSD_TIMING_UHS_DDR50
#define MMC_TIMING_UHS_SDR50 MMCSD_TIMING_UHS_SDR50
#define MMC_TIMING_MMC_HS200 MMCSD_TIMING_MMC_HS200
#define MMC_TIMING_MMC_HS400 MMCSD_TIMING_MMC_HS400
#define MMC_TIMING_UHS_SDR104 MMCSD_TIMING_UHS_SDR104
#define MMC_TIMING_UHS_SDR25 MMCSD_TIMING_UHS_SDR25
#define MMC_TIMING_MMC_DDR52 MMCSD_TIMING_MMC_DDR52
#define MMC_TIMING_UHS_SDR12 MMCSD_TIMING_UHS_SDR12
#define MMC_TIMING_SD_HS MMCSD_TIMING_SD_HS
#define MMC_TIMING_MMC_HS MMCSD_TIMING_MMC_HS
#define MMC_POWER_OFF MMCSD_POWER_OFF
#define MMC_POWER_UP MMCSD_POWER_UP
#define MMC_POWER_ON MMCSD_POWER_ON
#define MMC_POWER_UNDEFINED 3
#define MMC_SET_DRIVER_TYPE_B 0
#define MMC_SET_DRIVER_TYPE_A 1
#define MMC_SET_DRIVER_TYPE_C 2
#define MMC_SET_DRIVER_TYPE_D 3
#define MMC_SIGNAL_VOLTAGE_330 0
#define MMC_SIGNAL_VOLTAGE_180 1
#define MMC_SIGNAL_VOLTAGE_120 2
#define MMC_RSP_PRESENT (1 << 16)
#define MMC_RSP_136 (1 << 17) /* 136 bit response */
#define MMC_RSP_CRC (1 << 18) /* expect valid crc */
#define MMC_RSP_BUSY (1 << 19) /* card may send busy */
#define MMC_RSP_OPCODE (1 << 20) /* response contains opcode */
#define MMC_RSP_NONE (0)
#define MMC_RSP_R1 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_RSP_R1B (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | MMC_RSP_BUSY)
#define MMC_RSP_R2 (MMC_RSP_PRESENT | MMC_RSP_136 | MMC_RSP_CRC)
#define MMC_RSP_R3 (MMC_RSP_PRESENT)
#define MMC_RSP_R4 (MMC_RSP_PRESENT)
#define MMC_RSP_R5 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_CMD_ADTC CMD_ADTC
#define MMC_BUS_WIDTH_8 MMCSD_BUS_WIDTH_8
#define MMC_BUS_WIDTH_4 MMCSD_BUS_WIDTH_4
#define MMC_BUS_WIDTH_1 MMCSD_BUS_WIDTH_1
#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */
#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */
enum mmc_blk_status
{
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
MMC_BLK_CMD_ERR,
MMC_BLK_RETRY,
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
MMC_BLK_NEW_REQUEST,
};
#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1)
struct rt_mmc_host ;
struct mmc_host_ops
{
void (*request)(struct rt_mmc_host *host, struct rt_mmcsd_req *req);
void (*set_ios)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios);
int (*get_ro)(struct rt_mmc_host *host);
int (*get_cd)(struct rt_mmc_host *host);
void (*enable_sdio_irq)(struct rt_mmc_host *host, int enable);
void (*ack_sdio_irq)(struct rt_mmc_host *host);
int (*start_signal_voltage_switch)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios);
int (*card_busy)(struct rt_mmc_host *host);
int (*execute_tuning)(struct rt_mmc_host *host, unsigned opcode);
int (*prepare_hs400_tuning)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios);
int (*hs400_prepare_ddr)(struct rt_mmc_host *host);
void (*hs400_downgrade)(struct rt_mmc_host *host);
void (*hs400_complete)(struct rt_mmc_host *host);
void (*hs400_enhanced_strobe)(struct rt_mmc_host *host,
struct rt_mmcsd_io_cfg* ios);
void (*hw_reset)(struct rt_mmc_host* host);
void (*card_event)(struct rt_mmc_host* host);
};
struct regulator;
struct mmc_pwrseq;
struct mmc_supply
{
struct regulator *vmmc; /* Card power supply */
struct regulator *vqmmc; /* Optional Vccq supply */
};
struct mmc_ctx
{
struct task_struct *task;
};
/* VDD voltage 3.3 ~ 3.4 */
#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */
#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */
#define MMC_CAP2_HS200_1_8V_SDR MMCSD_SUP_HS200_1V8
#define MMC_CAP_4_BIT_DATA MMCSD_BUSWIDTH_4
#define MMC_CAP_8_BIT_DATA MMCSD_BUSWIDTH_8
#define MMC_CAP2_HS200 MMCSD_SUP_HS200
#define MMC_CAP_MMC_HIGHSPEED MMCSD_SUP_HIGHSPEED
#define MMC_CAP_SD_HIGHSPEED MMCSD_SUP_HIGHSPEED
#define MMC_CAP_1_8V_DDR MMCSD_SUP_DDR_1V8
#define MMC_CAP_3_3V_DDR MMCSD_SUP_DDR_3V3
#define MMC_CAP_1_2V_DDR MMCSD_SUP_DDR_1V2
#define MMC_CAP_NONREMOVABLE MMCSD_SUP_NONREMOVABLE
#define MMC_CAP_UHS_DDR50 0
#define MMC_CAP2_HS400 0
#define MMC_CAP_UHS_SDR50 0
#define MMC_CAP_UHS_SDR25 0
#define MMC_CAP_UHS_SDR12 0
#define MMC_CAP_UHS_SDR104 0
#define MMC_CAP_UHS 0
#define MMC_CAP2_HSX00_1_8V 0
#define MMC_CAP2_HS400_ES 0
#define MMC_CAP_NEEDS_POLL 0
#define MMC_CAP2_HSX00_1_2V 0
#define MMC_CAP2_HS400_1_8V 0
#define MMC_CAP_DRIVER_TYPE_D 0
#define MMC_CAP_DRIVER_TYPE_C 0
#define MMC_SET_DRIVER_TYPE_B 0
#define MMC_CAP_DRIVER_TYPE_A 0
#define MMC_CAP2_SDIO_IRQ_NOTHREAD 0
#define MMC_CAP_CMD23 0
#define MMC_CAP_SDIO_IRQ 0
#define MMC_CAP2_NO_SDIO (1 << 19)
#define MMC_CAP2_NO_SD (1 << 21)
#define MMC_CAP2_NO_MMC (1 << 22)
#define MMC_CAP2_CQE (1 << 23)
#define MMC_VDD_165_195 VDD_165_195
#define MMC_VDD_20_21 VDD_20_21
#define MMC_VDD_29_30 VDD_29_30
#define MMC_VDD_30_31 VDD_30_31
#define MMC_VDD_32_33 VDD_32_33
#define MMC_VDD_33_34 VDD_33_34
struct rt_mmc_host
{
struct rt_mmcsd_host rthost;
struct rt_device *parent;
int index;
const struct mmc_host_ops *ops;
unsigned int f_min;
unsigned int f_max;
unsigned int f_init;
rt_uint32_t ocr_avail;
rt_uint32_t ocr_avail_sdio; /* SDIO-specific OCR */
rt_uint32_t ocr_avail_sd; /* SD-specific OCR */
rt_uint32_t ocr_avail_mmc; /* MMC-specific OCR */
struct wakeup_source *ws; /* Enable consume of uevents */
rt_uint32_t max_current_330;
rt_uint32_t max_current_300;
rt_uint32_t max_current_180;
rt_uint32_t caps; /* Host capabilities */
rt_uint32_t caps2; /* More host capabilities */
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_segs; /* see blk_queue_max_segments */
unsigned short unused;
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */
unsigned int max_blk_count; /* maximum number of blocks in one req */
unsigned int max_busy_timeout; /* max busy timeout in ms */
struct rt_mmcsd_io_cfg ios; /* current io bus settings */
unsigned int retune_period;
/* group bitfields together to minimize padding */
unsigned int use_spi_crc : 1;
unsigned int claimed : 1; /* host exclusively claimed */
unsigned int doing_init_tune : 1; /* initial tuning in progress */
unsigned int can_retune : 1; /* re-tuning can be used */
unsigned int doing_retune : 1; /* re-tuning in progress */
unsigned int retune_now : 1; /* do re-tuning at next req */
unsigned int retune_paused : 1; /* re-tuning is temporarily disabled */
unsigned int retune_crc_disable : 1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge : 1; /* merging can be used */
unsigned int vqmmc_enabled : 1; /* vqmmc regulator is enabled */
int need_retune; /* re-tuning is needed */
int hold_retune; /* hold off re-tuning */
rt_bool_t trigger_card_event; /* card_event necessary */
unsigned int sdio_irqs;
rt_bool_t sdio_irq_pending;
struct led_trigger *led; /* activity led */
struct mmc_supply supply;
/* Ongoing data transfer that allows commands during transfer */
struct rt_mmcsd_req *ongoing_mrq;
unsigned int actual_clock; /* Actual HC clock rate */
rt_uint32_t pm_caps;
unsigned long private[];
};
static inline int mmc_card_is_removable(struct rt_mmc_host *host)
{
return !(host->caps & MMC_CAP_NONREMOVABLE);
}
struct device_node;
struct rt_mmc_host *rt_mmc_alloc_host(int extra, struct rt_device *);
int rt_mmc_add_host(struct rt_mmc_host *);
void rt_mmc_remove_host(struct rt_mmc_host *);
void rt_mmc_free_host(struct rt_mmc_host *);
int rt_mmc_of_parse(struct rt_mmc_host *host);
int rt_mmc_of_parse_voltage(struct rt_mmc_host *host, rt_uint32_t *mask);
static inline void *mmc_priv(struct rt_mmc_host *host)
{
return (void *)host->private;
}
#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (x->parent->parent.name)
void rt_mmc_detect_change(struct rt_mmc_host *, unsigned long delay);
void rt_mmc_request_done(struct rt_mmc_host *, struct rt_mmcsd_req *);
void mmc_command_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq);
void mmc_cqe_request_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq);
static inline rt_bool_t sdio_irq_claimed(struct rt_mmc_host *host)
{
return host->sdio_irqs > 0;
}
static inline int mmc_regulator_set_ocr(struct rt_mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit)
{
return 0;
}
int mmc_regulator_get_supply(struct rt_mmc_host *mmc);
int mmc_regulator_enable_vqmmc(struct rt_mmc_host *mmc);
void mmc_regulator_disable_vqmmc(struct rt_mmc_host *mmc);
void mmc_retune_timer_stop(struct rt_mmc_host* host);
enum dma_data_direction
{
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1))
static inline void mmc_retune_needed(struct rt_mmc_host *host)
{
if (host->can_retune)
host->need_retune = 1;
}
static inline rt_bool_t mmc_can_retune(struct rt_mmc_host *host)
{
return host->can_retune == 1;
}
static inline rt_bool_t mmc_doing_retune(struct rt_mmc_host *host)
{
return host->doing_retune == 1;
}
static inline rt_bool_t mmc_doing_tune(struct rt_mmc_host *host)
{
return host->doing_retune == 1 || host->doing_init_tune == 1;
}
static inline int mmc_get_dma_dir(struct rt_mmcsd_data *data)
{
return data->flags & DATA_DIR_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
static inline rt_bool_t mmc_op_multi(rt_uint32_t opcode)
{
return opcode == MMC_WRITE_MULTIPLE_BLOCK || opcode == MMC_READ_MULTIPLE_BLOCK;
}
static inline rt_bool_t mmc_op_tuning(rt_uint32_t opcode)
{
return opcode == MMC_SEND_TUNING_BLOCK || opcode == MMC_SEND_TUNING_BLOCK_HS200;
}
int rt_mmc_gpio_get_cd(struct rt_mmc_host *host);
void rt_mmc_detect_change(struct rt_mmc_host *host, unsigned long delay);
int rt_mmc_regulator_set_vqmmc(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios);
rt_bool_t rt_mmc_can_gpio_ro(struct rt_mmc_host *host);
int rt_mmc_gpio_get_ro(struct rt_mmc_host *host);
int rt_mmc_send_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode);
int rt_mmc_of_parse(struct rt_mmc_host *host);
#endif

View File

@@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#ifndef __RT_SDHCI_MISC_H__
#define __RT_SDHCI_MISC_H__
#define __BF_FIELD_CHECK(...)
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
#define FIELD_GET(_mask, _reg) \
({ \
__BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
#define FIELD_PREP(_mask, _val) \
({ \
__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
})
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define min_t(type, x, y) (((type)x < (type)y) ? x : y)
#define max_t(type, x, y) (((type)x > (type)y) ? x : y)
#define min(x, y) ((x) < (y) ? (x) : (y))
#define from_timer(var, callback_timer, timer_fieldname) \
container_of(callback_timer, typeof(*var), timer_fieldname)
#define le32_to_cpu(x) (x)
#define le16_to_cpu(x) (x)
#define cpu_to_le16(x) (x)
#define cpu_to_le32(x) (x)
#define lower_32_bits(n) ((rt_uint32_t)((n) & 0xffffffff))
#define upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16))
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define do_div(n, base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
__rem = ((uint64_t)(n)) % __base; \
(n) = ((uint64_t)(n)) / __base; \
__rem; \
})
#define fallthrough \
do { \
} while (0)
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV);
rt_bool_t rt_mmc_can_gpio_cd(struct rt_mmc_host *host);
struct regulator
{
const char *supply_name;
};
int regulator_get_current_limit(struct regulator *regulator);
#endif

View File

@@ -0,0 +1,125 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#include "sdhci-platform.h"
static const struct rt_sdhci_ops sdhci_pltfm_ops = {
.set_clock = rt_sdhci_set_clock,
.set_bus_width = rt_sdhci_set_bus_width,
.reset = rt_sdhci_reset,
.set_uhs_signaling = rt_sdhci_set_uhs,
};
void rt_sdhci_get_property(struct rt_platform_device *pdev)
{
struct rt_device *dev = &pdev->parent;
struct rt_sdhci_host *host = pdev->priv;
struct rt_sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
rt_uint32_t bus_width;
if (rt_dm_dev_prop_read_bool(dev, "sdhci,auto-cmd12"))
host->quirks |= RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
if (rt_dm_dev_prop_read_bool(dev, "sdhci,1-bit-only") || (rt_dm_dev_prop_read_u32(dev, "bus-width", &bus_width) == 0 && bus_width == 1))
host->quirks |= RT_SDHCI_QUIRK_FORCE_1_BIT_DATA;
if (rt_dm_dev_prop_read_bool(dev, "broken-cd"))
host->quirks |= RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (rt_dm_dev_prop_read_bool(dev, "no-1-8-v"))
host->quirks2 |= RT_SDHCI_QUIRK2_NO_1_8_V;
rt_dm_dev_prop_read_u32(dev, "clock-frequency", &pltfm_host->clock);
if (rt_dm_dev_prop_read_bool(dev, "keep-power-in-suspend"))
host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
if (rt_dm_dev_prop_read_bool(dev, "wakeup-source") || rt_dm_dev_prop_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
}
struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev,
const struct rt_sdhci_pltfm_data *pdata,
size_t priv_size)
{
struct rt_sdhci_host *host;
struct rt_device *dev = &pdev->parent;
void *ioaddr;
int irq;
ioaddr = rt_dm_dev_iomap(dev, 0);
if (!ioaddr)
{
return RT_NULL;
}
irq = rt_dm_dev_get_irq(dev, 0);
if (irq < 0)
{
return RT_NULL;
}
host = rt_sdhci_alloc_host(dev,sizeof(struct rt_sdhci_pltfm_host) + priv_size);
if (!host)
{
return RT_NULL;
}
host->irq = irq;
host->ioaddr = ioaddr;
host->hw_name = rt_dm_dev_get_name(dev);
if (pdata && pdata->ops)
host->ops = pdata->ops;
else
host->ops = &sdhci_pltfm_ops;
if (pdata)
{
host->quirks = pdata->quirks;
host->quirks2 = pdata->quirks2;
}
pdev->priv = host;
return host;
}
int rt_sdhci_pltfm_init_and_add_host(struct rt_platform_device *pdev,
const struct rt_sdhci_pltfm_data *pdata,
size_t priv_size)
{
struct rt_sdhci_host *host;
int ret = 0;
host = rt_sdhci_pltfm_init(pdev, pdata, priv_size);
if (!host)
return -RT_ERROR;
rt_sdhci_get_property(pdev);
ret = rt_sdhci_init_host(host);
if (ret)
rt_sdhci_pltfm_free(pdev);
return ret;
}
void rt_sdhci_pltfm_free(struct rt_platform_device *pdev)
{
struct rt_sdhci_host *host = pdev->priv;
rt_sdhci_free_host(host);
}
void rt_sdhci_pltfm_remove(struct rt_platform_device *pdev)
{
struct rt_sdhci_host *host = pdev->priv;
int dead = (readl(host->ioaddr + RT_SDHCI_INT_STATUS) == 0xffffffff);
rt_sdhci_uninit_host(host, dead);
rt_sdhci_pltfm_free(pdev);
}

File diff suppressed because it is too large Load Diff