[DEVICE/SDIO] port to the block

1. remove gpt.
2. remove block device custom.

Signed-off-by: GuEe-GUI <2991707448@qq.com>
This commit is contained in:
GuEe-GUI 2024-10-30 11:41:07 +08:00 committed by Rbb666
parent c424cb8186
commit 33785ca68a
8 changed files with 144 additions and 1370 deletions

View File

@ -1,132 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-05-05 linzhenxing first version
*/
#ifndef __DEV_GPT_H__
#define __DEV_GPT_H__
#include <rtthread.h>
#include <stdint.h>
typedef struct
{
uint8_t b[16]; /* GUID 16 bytes*/
} guid_t;
#define MSDOS_MBR_SIGNATURE 0xaa55
#define EFI_PMBR_OSTYPE_EFI 0xEF
#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
#define GPT_MBR_PROTECTIVE 1
#define GPT_MBR_HYBRID 2
#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
#define GPT_HEADER_REVISION_V1 0x00010000
#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
typedef guid_t gpt_guid_t __attribute__ ((aligned (4)));
#define EFI_GUID(a, b, c, d...) (gpt_guid_t){ { \
(a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
(c) & 0xff, ((c) >> 8) & 0xff, d } }
#define NULL_GUID \
EFI_GUID(0x00000000, 0x0000, 0x0000,\
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
#define PARTITION_SYSTEM_GUID \
EFI_GUID( 0xC12A7328, 0xF81F, 0x11d2, \
0xBA, 0x4B, 0x00, 0xA0, 0xC9, 0x3E, 0xC9, 0x3B)
#define LEGACY_MBR_PARTITION_GUID \
EFI_GUID( 0x024DEE41, 0x33E7, 0x11d3, \
0x9D, 0x69, 0x00, 0x08, 0xC7, 0x81, 0xF3, 0x9F)
#define PARTITION_MSFT_RESERVED_GUID \
EFI_GUID( 0xE3C9E316, 0x0B5C, 0x4DB8, \
0x81, 0x7D, 0xF9, 0x2D, 0xF0, 0x02, 0x15, 0xAE)
#define PARTITION_BASIC_DATA_GUID \
EFI_GUID( 0xEBD0A0A2, 0xB9E5, 0x4433, \
0x87, 0xC0, 0x68, 0xB6, 0xB7, 0x26, 0x99, 0xC7)
#define PARTITION_LINUX_RAID_GUID \
EFI_GUID( 0xa19d880f, 0x05fc, 0x4d3b, \
0xa0, 0x06, 0x74, 0x3f, 0x0f, 0x84, 0x91, 0x1e)
#define PARTITION_LINUX_SWAP_GUID \
EFI_GUID( 0x0657fd6d, 0xa4ab, 0x43c4, \
0x84, 0xe5, 0x09, 0x33, 0xc8, 0x4b, 0x4f, 0x4f)
#define PARTITION_LINUX_LVM_GUID \
EFI_GUID( 0xe6d6d379, 0xf507, 0x44c2, \
0xa2, 0x3c, 0x23, 0x8f, 0x2a, 0x3d, 0xf9, 0x28)
#pragma pack(push, 1)
typedef struct _gpt_header
{
uint64_t signature;
uint32_t revision;
uint32_t header_size;
uint32_t header_crc32;
uint32_t reserved1;
uint64_t start_lba; /*GPT head start sector*/
uint64_t alternate_lba; /*GPT head alternate sector*/
uint64_t first_usable_lba;
uint64_t last_usable_lba;
gpt_guid_t disk_guid;
uint64_t partition_entry_lba;
uint32_t num_partition_entries;
uint32_t sizeof_partition_entry;
uint32_t partition_entry_array_crc32;
/* The rest of the logical block is reserved by UEFI and must be zero.
* EFI standard handles this by:
*
* uint8_t reserved2[ BlockSize - 92 ];
*/
} gpt_header;
typedef struct _gpt_entry_attributes
{
uint64_t required_to_function:1;
uint64_t reserved:47;
uint64_t type_guid_specific:16;
} gpt_entry_attributes;
typedef struct _gpt_entry
{
gpt_guid_t partition_type_guid;
gpt_guid_t unique_partition_guid;
uint64_t starting_lba;
uint64_t ending_lba;
gpt_entry_attributes attributes;
uint16_t partition_name[72/sizeof(uint16_t)];
} gpt_entry;
typedef struct _gpt_mbr_record
{
uint8_t boot_indicator; /* unused by EFI, set to 0x80 for bootable */
uint8_t start_head; /* unused by EFI, pt start in CHS */
uint8_t start_sector; /* unused by EFI, pt start in CHS */
uint8_t start_track;
uint8_t os_type; /* EFI and legacy non-EFI OS types */
uint8_t end_head; /* unused by EFI, pt end in CHS */
uint8_t end_sector; /* unused by EFI, pt end in CHS */
uint8_t end_track; /* unused by EFI, pt end in CHS */
uint32_t starting_lba; /* used by EFI - start addr of the on disk pt */
uint32_t size_in_lba; /* used by EFI - size of pt in LBA */
} gpt_mbr_record;
typedef struct _legacy_mbr
{
uint8_t boot_code[440];
uint32_t unique_mbr_signature;
uint16_t unknown;
gpt_mbr_record partition_record[4];
uint16_t signature;
} legacy_mbr;
#pragma pack(pop)
int check_gpt(struct rt_mmcsd_card *card);
int gpt_get_partition_param(struct rt_mmcsd_card *card, struct dfs_partition *part, uint32_t pindex);
void gpt_free(void);
#endif /*__DEV_GPT_H__*/

View File

@ -249,8 +249,6 @@ struct rt_mmcsd_host *mmcsd_alloc_host(void);
void mmcsd_free_host(struct rt_mmcsd_host *host);
int rt_mmcsd_core_init(void);
int rt_mmcsd_blk_init(void);
rt_int32_t read_lba(struct rt_mmcsd_card *card, size_t lba, uint8_t *buffer, size_t count);
rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card);
void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card);

View File

@ -218,7 +218,7 @@ struct rt_mmcsd_card {
struct rt_sdio_cccr cccr; /* common card info */
struct rt_sdio_cis cis; /* common tuple info */
struct rt_sdio_function *sdio_function[SDIO_MAX_FUNCTIONS + 1]; /* SDIO functions (devices) */
rt_list_t blk_devices; /* for block device list */
void *blk_dev;
struct rt_mmc_ext_csd ext_csd;
};

View File

@ -134,6 +134,7 @@ struct rt_mmcsd_host
#define MMCSD_SUP_HS200_1V2 (1 << 10)
#define MMCSD_SUP_HS200 (MMCSD_SUP_HS200_1V2 | MMCSD_SUP_HS200_1V8) /* hs200 sdr */
#define MMCSD_SUP_NONREMOVABLE (1 << 11)
#define controller_is_removable(host) (!(host->flags & MMCSD_SUP_NONREMOVABLE))
#define MMCSD_SUP_HS400_1V8 (1 << 12)
#define MMCSD_SUP_HS400_1V2 (1 << 13)
#define MMCSD_SUP_HS400 (MMCSD_SUP_HS400_1V2 | MMCSD_SUP_HS400_1V8) /* hs400 ddr */

View File

@ -1,5 +1,6 @@
config RT_USING_SDIO
bool "Using SD/MMC device drivers"
select RT_USING_BLK
default n
if RT_USING_SDIO

View File

@ -7,7 +7,6 @@ dev_block.c
dev_mmcsd_core.c
dev_sd.c
dev_sdio.c
dev_gpt.c
dev_mmc.c
""")

View File

@ -6,14 +6,13 @@
* Change Logs:
* Date Author Notes
* 2011-07-25 weety first version
* 2023-08-08 GuEe-GUI port to the block
*/
#include <rtthread.h>
#include <dfs_fs.h>
#include <dfs_file.h>
#include <drivers/blk.h>
#include <drivers/misc.h>
#include <drivers/dev_mmcsd_core.h>
#include <drivers/dev_gpt.h>
#define DBG_TAG "SDIO"
#ifdef RT_SDIO_DEBUG
@ -23,26 +22,24 @@
#endif /* RT_SDIO_DEBUG */
#include <rtdbg.h>
static rt_list_t blk_devices = RT_LIST_OBJECT_INIT(blk_devices);
#define BLK_MIN(a, b) ((a) < (b) ? (a) : (b))
#define RT_DEVICE_CTRL_BLK_SSIZEGET 0x1268 /**< get number of bytes per sector */
#define RT_DEVICE_CTRL_ALL_BLK_SSIZEGET 0x80081272 /**< get number of bytes per sector * sector counts*/
struct mmcsd_blk_device
{
struct rt_mmcsd_card *card;
rt_list_t list;
struct rt_device dev;
struct dfs_partition part;
struct rt_device_blk_geometry geometry;
rt_size_t max_req_size;
};
#ifndef RT_MMCSD_MAX_PARTITION
#define RT_MMCSD_MAX_PARTITION 16
#endif
#define RT_GPT_PARTITION_MAX 128
struct mmcsd_blk_device
{
struct rt_blk_disk parent;
struct rt_mmcsd_card *card;
rt_size_t max_req_size;
struct rt_device_blk_geometry geometry;
};
#define raw_to_mmcsd_blk(raw) rt_container_of(raw, struct mmcsd_blk_device, parent)
#ifdef RT_USING_DM
static struct rt_dm_ida sdio_ida = RT_DM_IDA_INIT(SDIO);
#endif
static int __send_status(struct rt_mmcsd_card *card, rt_uint32_t *status, unsigned retries)
{
@ -252,131 +249,6 @@ static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
return RT_EOK;
}
static rt_err_t rt_mmcsd_init(rt_device_t dev)
{
return RT_EOK;
}
static rt_err_t rt_mmcsd_open(rt_device_t dev, rt_uint16_t oflag)
{
return RT_EOK;
}
static rt_err_t rt_mmcsd_close(rt_device_t dev)
{
return RT_EOK;
}
static rt_err_t rt_mmcsd_control(rt_device_t dev, int cmd, void *args)
{
struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
switch (cmd)
{
case RT_DEVICE_CTRL_BLK_GETGEOME:
rt_memcpy(args, &blk_dev->geometry, sizeof(struct rt_device_blk_geometry));
break;
case RT_DEVICE_CTRL_BLK_PARTITION:
rt_memcpy(args, &blk_dev->part, sizeof(struct dfs_partition));
break;
case RT_DEVICE_CTRL_BLK_SSIZEGET:
rt_memcpy(args, &blk_dev->geometry.bytes_per_sector, sizeof(rt_uint32_t));
break;
case RT_DEVICE_CTRL_ALL_BLK_SSIZEGET:
{
rt_uint64_t count_mul_per = blk_dev->geometry.bytes_per_sector * blk_dev->geometry.sector_count;
rt_memcpy(args, &count_mul_per, sizeof(rt_uint64_t));
}
break;
default:
break;
}
return RT_EOK;
}
static rt_ssize_t rt_mmcsd_read(rt_device_t dev,
rt_off_t pos,
void *buffer,
rt_size_t size)
{
rt_err_t err = 0;
rt_size_t offset = 0;
rt_size_t req_size = 0;
rt_size_t remain_size = size;
void *rd_ptr = (void *)buffer;
struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
struct dfs_partition *part = &blk_dev->part;
if (dev == RT_NULL)
{
rt_set_errno(-EINVAL);
return 0;
}
rt_sem_take(part->lock, RT_WAITING_FOREVER);
while (remain_size)
{
req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, rd_ptr, req_size, 0);
if (err)
break;
offset += req_size;
rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
remain_size -= req_size;
}
rt_sem_release(part->lock);
/* the length of reading must align to SECTOR SIZE */
if (err)
{
rt_set_errno(-EIO);
return 0;
}
return size - remain_size;
}
static rt_ssize_t rt_mmcsd_write(rt_device_t dev,
rt_off_t pos,
const void *buffer,
rt_size_t size)
{
rt_err_t err = 0;
rt_size_t offset = 0;
rt_size_t req_size = 0;
rt_size_t remain_size = size;
void *wr_ptr = (void *)buffer;
struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
struct dfs_partition *part = &blk_dev->part;
if (dev == RT_NULL)
{
rt_set_errno(-EINVAL);
return 0;
}
rt_sem_take(part->lock, RT_WAITING_FOREVER);
while (remain_size)
{
req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, wr_ptr, req_size, 1);
if (err)
break;
offset += req_size;
wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
remain_size -= req_size;
}
rt_sem_release(part->lock);
/* the length of reading must align to SECTOR SIZE */
if (err)
{
rt_set_errno(-EIO);
return 0;
}
return size - remain_size;
}
static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
{
struct rt_mmcsd_cmd cmd;
@ -402,551 +274,149 @@ static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
return 0;
}
rt_int32_t read_lba(struct rt_mmcsd_card *card, size_t lba, uint8_t *buffer, size_t count)
static rt_ssize_t mmcsd_blk_read(struct rt_blk_disk *disk, rt_off_t sector,
void *buffer, rt_size_t sector_count)
{
rt_uint8_t status = 0;
rt_err_t err;
rt_size_t offset = 0;
rt_size_t req_size = 0;
rt_size_t remain_size = sector_count;
void *rd_ptr = (void *)buffer;
struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
status = mmcsd_set_blksize(card);
if (status)
while (remain_size)
{
return status;
}
rt_thread_mdelay(1);
status = rt_mmcsd_req_blk(card, lba, buffer, count, 0);
return status;
}
req_size = rt_min_t(rt_size_t, remain_size, blk_dev->max_req_size);
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops mmcsd_blk_ops =
{
rt_mmcsd_init,
rt_mmcsd_open,
rt_mmcsd_close,
rt_mmcsd_read,
rt_mmcsd_write,
rt_mmcsd_control
};
#endif
err = rt_mmcsd_req_blk(blk_dev->card, sector + offset, rd_ptr, req_size, 0);
#ifdef RT_USING_DFS_V2
static ssize_t rt_mmcsd_fops_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
{
int result = 0;
rt_device_t dev = (rt_device_t)file->vnode->data;
struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
int bytes_per_sector = blk_dev->geometry.bytes_per_sector;
int blk_pos = *pos / bytes_per_sector;
int first_offs = *pos % bytes_per_sector;
char *rbuf;
int rsize = 0;
rbuf = rt_malloc(bytes_per_sector);
if (!rbuf)
{
return 0;
}
/*
** #1: read first unalign block size.
*/
result = rt_mmcsd_read(dev, blk_pos, rbuf, 1);
if (result != 1)
{
rt_free(rbuf);
return 0;
}
if (count > bytes_per_sector - first_offs)
{
rsize = bytes_per_sector - first_offs;
}
else
{
rsize = count;
}
rt_memcpy(buf, rbuf + first_offs, rsize);
blk_pos++;
/*
** #2: read continuous block size.
*/
while (rsize < count)
{
result = rt_mmcsd_read(dev, blk_pos++, rbuf, 1);
if (result != 1)
{
break;
}
if (count - rsize >= bytes_per_sector)
{
rt_memcpy(buf + rsize, rbuf, bytes_per_sector);
rsize += bytes_per_sector;
}
else
{
rt_memcpy(buf + rsize, rbuf, count - rsize);
rsize = count;
}
}
rt_free(rbuf);
*pos += rsize;
return rsize;
}
static int rt_mmcsd_fops_ioctl(struct dfs_file *file, int cmd, void *arg)
{
rt_device_t dev = (rt_device_t)file->vnode->data;
return rt_mmcsd_control(dev,cmd,arg);
}
static int rt_mmcsd_fops_open(struct dfs_file *file)
{
rt_device_t dev = (rt_device_t)file->vnode->data;
rt_mmcsd_control(dev, RT_DEVICE_CTRL_ALL_BLK_SSIZEGET, &file->vnode->size);
return RT_EOK;
}
static int rt_mmcsd_fops_close(struct dfs_file *file)
{
return RT_EOK;
}
static ssize_t rt_mmcsd_fops_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
{
int result = 0;
rt_device_t dev = (rt_device_t)file->vnode->data;
struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
int bytes_per_sector = blk_dev->geometry.bytes_per_sector;
int blk_pos = *pos / bytes_per_sector;
int first_offs = *pos % bytes_per_sector;
char *rbuf = 0;
int wsize = 0;
/*
** #1: write first unalign block size.
*/
if (first_offs != 0)
{
if (count > bytes_per_sector - first_offs)
{
wsize = bytes_per_sector - first_offs;
}
else
{
wsize = count;
}
rbuf = rt_malloc(bytes_per_sector);
if (!rbuf)
{
return 0;
}
result = rt_mmcsd_read(dev, blk_pos, rbuf, 1);
if (result != 1)
{
rt_free(rbuf);
return 0;
}
rt_memcpy(rbuf + first_offs, buf, wsize);
result = rt_mmcsd_write(dev, blk_pos, rbuf, 1);
if (result != 1)
{
rt_free(rbuf);
return 0;
}
rt_free(rbuf);
blk_pos += 1;
}
/*
** #2: write continuous block size.
*/
if ((count - wsize) / bytes_per_sector != 0)
{
result = rt_mmcsd_write(dev, blk_pos, buf + wsize, (count - wsize) / bytes_per_sector);
wsize += result * bytes_per_sector;
blk_pos += result;
if (result != (count - wsize) / bytes_per_sector)
{
*pos += wsize;
return wsize;
}
}
/*
** # 3: write last unalign block size.
*/
if ((count - wsize) != 0)
{
rbuf = rt_malloc(bytes_per_sector);
if (rbuf != RT_NULL)
{
result = rt_mmcsd_read(dev, blk_pos, rbuf, 1);
if (result == 1)
{
rt_memcpy(rbuf, buf + wsize, count - wsize);
result = rt_mmcsd_write(dev, blk_pos, rbuf, 1);
if (result == 1)
{
wsize += count - wsize;
}
}
rt_free(rbuf);
}
}
*pos += wsize;
return wsize;
}
static int rt_mmcsd_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
{
int mask = 0;
return mask;
}
static int rt_mmcsd_fops_flush(struct dfs_file *file)
{
return RT_EOK;
}
const static struct dfs_file_ops mmcsd_blk_fops =
{
rt_mmcsd_fops_open,
rt_mmcsd_fops_close,
rt_mmcsd_fops_ioctl,
rt_mmcsd_fops_read,
rt_mmcsd_fops_write,
rt_mmcsd_fops_flush,
generic_dfs_lseek,
RT_NULL,
RT_NULL,
rt_mmcsd_fops_poll
};
#endif
rt_int32_t gpt_device_probe(struct rt_mmcsd_card *card)
{
rt_int32_t err = RT_EOK;
rt_uint8_t i, status;
char dname[10];
char sname[16];
struct mmcsd_blk_device *blk_dev = RT_NULL;
blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
if (!blk_dev)
{
LOG_E("mmcsd:malloc memory failed!");
return -1;
}
blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
card->host->max_seg_size) >> 9,
(card->host->max_blk_count *
card->host->max_blk_size) >> 9);
blk_dev->part.offset = 0;
blk_dev->part.size = 0;
rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, 0);
blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
/* register mmcsd device */
blk_dev->dev.type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
blk_dev->dev.ops = &mmcsd_blk_ops;
#else
blk_dev->dev.init = rt_mmcsd_init;
blk_dev->dev.open = rt_mmcsd_open;
blk_dev->dev.close = rt_mmcsd_close;
blk_dev->dev.read = rt_mmcsd_read;
blk_dev->dev.write = rt_mmcsd_write;
blk_dev->dev.control = rt_mmcsd_control;
#endif
blk_dev->card = card;
blk_dev->geometry.bytes_per_sector = 1 << 9;
blk_dev->geometry.block_size = card->card_blksize;
blk_dev->geometry.sector_count =
card->card_capacity * (1024 / 512);
blk_dev->dev.user_data = blk_dev;
rt_device_register(&(blk_dev->dev), card->host->name,
RT_DEVICE_FLAG_RDWR);
#ifdef RT_USING_POSIX_DEVIO
#ifdef RT_USING_DFS_V2
blk_dev->dev.fops = &mmcsd_blk_fops;
#endif
#endif
rt_list_insert_after(&blk_devices, &blk_dev->list);
for (i = 0; i < RT_GPT_PARTITION_MAX; i++)
{
blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
if (!blk_dev)
{
LOG_E("mmcsd:malloc memory failed!");
break;
}
blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
card->host->max_seg_size) >> 9,
(card->host->max_blk_count *
card->host->max_blk_size) >> 9);
/* get the first partition */
status = gpt_get_partition_param(card, &blk_dev->part, i);
if (status == RT_EOK)
{
rt_snprintf(dname, sizeof(dname) - 1, "%s%d", card->host->name, i);
rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, i + 1);
blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
/* register mmcsd device */
blk_dev->dev.type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
blk_dev->dev.ops = &mmcsd_blk_ops;
#else
blk_dev->dev.init = rt_mmcsd_init;
blk_dev->dev.open = rt_mmcsd_open;
blk_dev->dev.close = rt_mmcsd_close;
blk_dev->dev.read = rt_mmcsd_read;
blk_dev->dev.write = rt_mmcsd_write;
blk_dev->dev.control = rt_mmcsd_control;
#endif
blk_dev->card = card;
blk_dev->geometry.bytes_per_sector = 1 << 9;
blk_dev->geometry.block_size = card->card_blksize;
blk_dev->geometry.sector_count = blk_dev->part.size;
blk_dev->dev.user_data = blk_dev;
rt_device_register(&(blk_dev->dev), dname,
RT_DEVICE_FLAG_RDWR);
#ifdef RT_USING_POSIX_DEVIO
#ifdef RT_USING_DFS_V2
blk_dev->dev.fops = &mmcsd_blk_fops;
#endif
#endif
rt_list_insert_after(&blk_devices, &blk_dev->list);
}
else
{
rt_free(blk_dev);
blk_dev = RT_NULL;
break;
}
#ifdef RT_USING_DFS_MNTTABLE
if (blk_dev)
{
LOG_I("try to mount file system!");
/* try to mount file system on this block device */
dfs_mount_device(&(blk_dev->dev));
}
#endif
}
gpt_free();
return err;
}
rt_int32_t mbr_device_probe(struct rt_mmcsd_card *card)
{
rt_int32_t err = 0;
rt_uint8_t i, status;
rt_uint8_t *sector;
char dname[10];
char sname[16];
struct mmcsd_blk_device *blk_dev = RT_NULL;
err = mmcsd_set_blksize(card);
if (err)
{
return err;
}
rt_thread_mdelay(1);
/* get the first sector to read partition table */
sector = (rt_uint8_t *)rt_malloc(SECTOR_SIZE);
if (sector == RT_NULL)
{
LOG_E("allocate partition sector buffer failed!");
return -RT_ENOMEM;
offset += req_size;
rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
remain_size -= req_size;
}
status = rt_mmcsd_req_blk(card, 0, sector, 1, 0);
if (status == RT_EOK)
{
blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
if (!blk_dev)
{
LOG_E("mmcsd:malloc memory failed!");
return -1;
}
blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
card->host->max_seg_size) >> 9,
(card->host->max_blk_count *
card->host->max_blk_size) >> 9);
blk_dev->part.offset = 0;
blk_dev->part.size = 0;
rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, 0);
blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
/* register mmcsd device */
blk_dev->dev.type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
blk_dev->dev.ops = &mmcsd_blk_ops;
#else
blk_dev->dev.init = rt_mmcsd_init;
blk_dev->dev.open = rt_mmcsd_open;
blk_dev->dev.close = rt_mmcsd_close;
blk_dev->dev.read = rt_mmcsd_read;
blk_dev->dev.write = rt_mmcsd_write;
blk_dev->dev.control = rt_mmcsd_control;
#endif
blk_dev->card = card;
blk_dev->geometry.bytes_per_sector = 1 << 9;
blk_dev->geometry.block_size = card->card_blksize;
blk_dev->geometry.sector_count =
card->card_capacity * (1024 / 512);
blk_dev->dev.user_data = blk_dev;
rt_device_register(&(blk_dev->dev), card->host->name,
RT_DEVICE_FLAG_RDWR);
rt_list_insert_after(&blk_devices, &blk_dev->list);
for (i = 0; i < RT_MMCSD_MAX_PARTITION; i++)
{
blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
if (!blk_dev)
{
LOG_E("mmcsd:malloc memory failed!");
break;
}
blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
card->host->max_seg_size) >> 9,
(card->host->max_blk_count *
card->host->max_blk_size) >> 9);
/* get the first partition */
status = dfs_filesystem_get_partition(&blk_dev->part, sector, i);
if (status == RT_EOK)
{
rt_snprintf(dname, sizeof(dname) - 1, "%s%d", card->host->name, i);
rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, i + 1);
blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
/* register mmcsd device */
blk_dev->dev.type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
blk_dev->dev.ops = &mmcsd_blk_ops;
#else
blk_dev->dev.init = rt_mmcsd_init;
blk_dev->dev.open = rt_mmcsd_open;
blk_dev->dev.close = rt_mmcsd_close;
blk_dev->dev.read = rt_mmcsd_read;
blk_dev->dev.write = rt_mmcsd_write;
blk_dev->dev.control = rt_mmcsd_control;
#endif
blk_dev->card = card;
blk_dev->geometry.bytes_per_sector = 1 << 9;
blk_dev->geometry.block_size = card->card_blksize;
blk_dev->geometry.sector_count = blk_dev->part.size;
blk_dev->dev.user_data = blk_dev;
rt_device_register(&(blk_dev->dev), dname,
RT_DEVICE_FLAG_RDWR);
rt_list_insert_after(&blk_devices, &blk_dev->list);
}
else
{
rt_free(blk_dev);
blk_dev = RT_NULL;
break;
}
#ifdef RT_USING_DFS_MNTTABLE
if (blk_dev)
{
LOG_I("try to mount file system!");
/* try to mount file system on this block device */
dfs_mount_device(&(blk_dev->dev));
}
#endif
}
}
else
{
LOG_E("read mmcsd first sector failed");
err = -RT_ERROR;
}
/* release sector buffer */
rt_free(sector);
return err;
return sector_count - remain_size;
}
static rt_ssize_t mmcsd_blk_write(struct rt_blk_disk *disk, rt_off_t sector,
const void *buffer, rt_size_t sector_count)
{
rt_err_t err;
rt_size_t offset = 0;
rt_size_t req_size = 0;
rt_size_t remain_size = sector_count;
void *wr_ptr = (void *)buffer;
struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
while (remain_size)
{
req_size = rt_min_t(rt_size_t, remain_size, blk_dev->max_req_size);
err = rt_mmcsd_req_blk(blk_dev->card, sector + offset, wr_ptr, req_size, 1);
if (err)
{
return err;
}
offset += req_size;
wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
remain_size -= req_size;
}
return sector_count - remain_size;
}
static rt_err_t mmcsd_blk_getgeome(struct rt_blk_disk *disk,
struct rt_device_blk_geometry *geometry)
{
struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
rt_memcpy(geometry, &blk_dev->geometry, sizeof(*geometry));
return RT_EOK;
}
static const struct rt_blk_disk_ops mmcsd_blk_ops =
{
.read = mmcsd_blk_read,
.write = mmcsd_blk_write,
.getgeome = mmcsd_blk_getgeome,
};
rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
{
uint32_t err = 0;
rt_err_t err;
struct rt_mmcsd_host *host = card->host;
struct mmcsd_blk_device *blk_dev = rt_calloc(1, sizeof(*blk_dev));
LOG_D("probe mmcsd block device!");
if (check_gpt(card) != 0)
if (!blk_dev)
{
err = gpt_device_probe(card);
return -RT_ENOMEM;
}
else
card->blk_dev = blk_dev;
#ifdef RT_USING_DM
blk_dev->parent.ida = &sdio_ida;
#endif
blk_dev->parent.parallel_io = RT_FALSE;
blk_dev->parent.removable = controller_is_removable(host);
blk_dev->parent.ops = &mmcsd_blk_ops;
blk_dev->parent.max_partitions = RT_MMCSD_MAX_PARTITION;
blk_dev->card = card;
blk_dev->max_req_size = rt_min_t(rt_size_t,
host->max_dma_segs * host->max_seg_size,
host->max_blk_count * host->max_blk_size) >> 9;
blk_dev->geometry.bytes_per_sector = 1 << 9;
blk_dev->geometry.block_size = card->card_blksize;
blk_dev->geometry.sector_count = card->card_capacity * (1024 / 512);
/* Set blk size before partitions probe, Why? */
if ((err = mmcsd_set_blksize(card)))
{
err = mbr_device_probe(card);
goto _fail;
}
rt_thread_mdelay(1);
#ifdef RT_USING_DM
rt_dm_dev_set_name(&blk_dev->parent.parent, host->name);
#else
rt_strncpy(blk_dev->parent.parent.parent.name, host->name, RT_NAME_MAX);
#endif
if ((err = rt_hw_blk_disk_register(&blk_dev->parent)))
{
goto _fail;
}
return RT_EOK;
_fail:
card->blk_dev = RT_NULL;
free(blk_dev);
return err;
}
void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
{
rt_list_t *l, *n;
struct mmcsd_blk_device *blk_dev;
struct mmcsd_blk_device *blk_dev = card->blk_dev;
for (l = (&blk_devices)->next, n = l->next; l != &blk_devices; l = n, n = n->next)
if (!blk_dev)
{
blk_dev = (struct mmcsd_blk_device *)rt_list_entry(l, struct mmcsd_blk_device, list);
if (blk_dev->card == card)
{
/* unmount file system */
const char *mounted_path = dfs_filesystem_get_mounted_path(&(blk_dev->dev));
if (mounted_path)
{
dfs_unmount(mounted_path);
LOG_D("unmount file system %s for device %s.\r\n", mounted_path, blk_dev->dev.parent.name);
return;
}
rt_sem_delete(blk_dev->part.lock);
rt_device_unregister(&blk_dev->dev);
rt_list_remove(&blk_dev->list);
if (!rt_hw_blk_disk_unregister(&blk_dev->parent))
{
card->blk_dev = RT_NULL;
rt_free(blk_dev);
}
}
}
/*
* This function will initialize block device on the mmc/sd.
*
* @deprecated since 2.1.0, this function does not need to be invoked
* in the system initialization.
*/
int rt_mmcsd_blk_init(void)
{
/* nothing */
return 0;
}

View File

@ -1,563 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-05-05 linzhenxing first version
*/
#include <rtthread.h>
#include <dfs_fs.h>
#include <drivers/dev_gpt.h>
#include <drivers/dev_mmcsd_core.h>
#define DBG_TAG "GPT"
#ifdef RT_SDIO_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_SDIO_DEBUG */
#include <rtdbg.h>
#define min(a, b) a < b ? a : b
static int force_gpt = 0;
static gpt_header *_gpt;
static gpt_entry *_ptes;
#define GPT_TYPE 1
#define MBR_TYPE 0
static inline int efi_guidcmp (gpt_guid_t left, gpt_guid_t right)
{
return rt_memcmp(&left, &right, sizeof (gpt_guid_t));
}
static uint32_t last_lba(struct rt_mmcsd_card *card)
{
RT_ASSERT(card != RT_NULL);
return (card->card_sec_cnt) - 1;
}
static inline int pmbr_part_valid(gpt_mbr_record *part)
{
if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT)
{
goto invalid;
}
/* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */
if ((uint32_t)(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA)
{
goto invalid;
}
return GPT_MBR_PROTECTIVE;
invalid:
return 0;
}
/*
*
* return ret
* ret = 0, invalid mbr
* ret = 1, protect mbr
* ret = 2, hybrid mbr
*/
int is_pmbr_valid(legacy_mbr *mbr, uint64_t total_sectors)
{
uint32_t sz = 0;
int i, part = 0, ret = 0; /* invalid by default */
if (!mbr || (uint16_t)(mbr->signature) != MSDOS_MBR_SIGNATURE)
{
goto done;
}
for (i = 0; i < 4; i++)
{
ret = pmbr_part_valid(&mbr->partition_record[i]);
if (ret == GPT_MBR_PROTECTIVE)
{
part = i;
/*
* Ok, we at least know that there's a protective MBR,
* now check if there are other partition types for
* hybrid MBR.
*/
goto check_hybrid;
}
}
if (ret != GPT_MBR_PROTECTIVE)
{
goto done;
}
check_hybrid:
for (i = 0; i < 4; i++)
{
if ((mbr->partition_record[i].os_type !=
EFI_PMBR_OSTYPE_EFI_GPT) &&
(mbr->partition_record[i].os_type != 0x00))
{
ret = GPT_MBR_HYBRID;
}
}
/*
* Protective MBRs take up the lesser of the whole disk
* or 2 TiB (32bit LBA), ignoring the rest of the disk.
* Some partitioning programs, nonetheless, choose to set
* the size to the maximum 32-bit limitation, disregarding
* the disk size.
*
* Hybrid MBRs do not necessarily comply with this.
*
* Consider a bad value here to be a warning to support dd'ing
* an image from a smaller disk to a larger disk.
*/
if (ret == GPT_MBR_PROTECTIVE)
{
sz = (uint32_t)(mbr->partition_record[part].size_in_lba);
if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
{
LOG_I("GPT: mbr size in lba (%u) different than whole disk (%u).",
sz, min(total_sectors - 1, 0xFFFFFFFF));
}
}
done:
return ret;
}
static gpt_entry *alloc_read_gpt_entries(struct rt_mmcsd_card *card, gpt_header *gpt)
{
size_t count;
gpt_entry *pte;
if (!gpt)
{
return RT_NULL;
}
count = (size_t)(gpt->num_partition_entries) * (gpt->sizeof_partition_entry);
if (!count)
{
return RT_NULL;
}
pte = rt_malloc(count);
if (!pte)
return RT_NULL;
if (read_lba(card, (size_t)(gpt->partition_entry_lba),(uint8_t *)pte, count/512) != RT_EOK)
{
rt_free(pte);
return RT_NULL;
}
return pte;
}
static gpt_header *alloc_read_gpt_header(struct rt_mmcsd_card *card, size_t lba)
{
gpt_header *gpt;
void *buf;
buf = rt_malloc(512);
if (!buf)
{
return RT_NULL;
}
if (read_lba(card, lba, (uint8_t *)buf, 1) != RT_EOK)
{
rt_free(buf);
return RT_NULL;
}
gpt = (gpt_header *)buf;
return gpt;
}
static int is_gpt_valid(struct rt_mmcsd_card *card, size_t lba, gpt_header **gpt, gpt_entry **ptes)
{
size_t lastlba;
if (!ptes || !gpt)
{
return 0;
}
*gpt = alloc_read_gpt_header(card, lba);
if (!(*gpt))
{
return 0;
}
/* Check the GUID Partition Table signature */
if ((uint64_t)((*gpt)->signature) != GPT_HEADER_SIGNATURE)
{
LOG_E("GUID Partition Table Header signature is wrong:"
"%ld != %ld",(uint64_t)((*gpt)->signature),(uint64_t)GPT_HEADER_SIGNATURE);
goto fail;
}
/* Check the GUID Partition Table header size is too small */
if ((uint32_t)((*gpt)->header_size) < sizeof(gpt_header))
{
LOG_E("GUID Partition Table Header size is too small: %u < %zu",
(uint32_t)((*gpt)->header_size),sizeof(gpt_header));
goto fail;
}
/* Check that the start_lba entry points to the LBA that contains
* the GUID Partition Table */
if ((uint64_t)((*gpt)->start_lba) != lba)
{
LOG_E("GPT start_lba incorrect: %ld != %ld",
(uint64_t)((*gpt)->start_lba),
(uint64_t)lba);
goto fail;
}
/* Check the first_usable_lba and last_usable_lba are
* within the disk.
*/
lastlba = last_lba(card);
if ((uint64_t)((*gpt)->first_usable_lba) > lastlba)
{
LOG_E("GPT: first_usable_lba incorrect: %ld > %ld",
((uint64_t)((*gpt)->first_usable_lba)),
(size_t)lastlba);
goto fail;
}
if ((uint64_t)((*gpt)->last_usable_lba) > lastlba)
{
LOG_E("GPT: last_usable_lba incorrect: %ld > %ld",
(uint64_t)((*gpt)->last_usable_lba),
(size_t)lastlba);
goto fail;
}
if ((uint64_t)((*gpt)->last_usable_lba) < (uint64_t)((*gpt)->first_usable_lba))
{
LOG_E("GPT: last_usable_lba incorrect: %ld > %ld",
(uint64_t)((*gpt)->last_usable_lba),
(uint64_t)((*gpt)->first_usable_lba));
goto fail;
}
/* Check that sizeof_partition_entry has the correct value */
if ((uint32_t)((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
LOG_E("GUID Partition Entry Size check failed.");
goto fail;
}
*ptes = alloc_read_gpt_entries(card, *gpt);
if (!(*ptes))
{
goto fail;
}
/* We're done, all's well */
return 1;
fail:
rt_free(*gpt);
*gpt = RT_NULL;
return 0;
}
/**
* is_pte_valid() - tests one PTE for validity
* pte:pte to check
* lastlba: last lba of the disk
*
* Description: returns 1 if valid, 0 on error.
*/
static inline int is_pte_valid(const gpt_entry *pte, const size_t lastlba)
{
if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) ||
(uint64_t)(pte->starting_lba) > lastlba ||
(uint64_t)(pte->ending_lba) > lastlba)
{
return 0;
}
return 1;
}
/**
* compare_gpts() - Search disk for valid GPT headers and PTEs
* pgpt: primary GPT header
* agpt: alternate GPT header
* lastlba: last LBA number
*
* Description: Returns nothing. Sanity checks pgpt and agpt fields
* and prints warnings on discrepancies.
*
*/
static void compare_gpts(gpt_header *pgpt, gpt_header *agpt, size_t lastlba)
{
int error_found = 0;
if (!pgpt || !agpt)
{
return;
}
if ((uint64_t)(pgpt->start_lba) != (uint64_t)(agpt->alternate_lba))
{
LOG_I("GPT:Primary header LBA != Alt. header alternate_lba");
LOG_I("GPT:%lld != %lld",
(uint64_t)(pgpt->start_lba),
(uint64_t)(agpt->alternate_lba));
error_found++;
}
if ((uint64_t)(pgpt->alternate_lba) != (uint64_t)(agpt->start_lba))
{
LOG_I("GPT:Primary header alternate_lba != Alt. header start_lba");
LOG_I("GPT:%lld != %lld",
(uint64_t)(pgpt->alternate_lba),
(uint64_t)(agpt->start_lba));
error_found++;
}
if ((uint64_t)(pgpt->first_usable_lba) != (uint64_t)(agpt->first_usable_lba))
{
LOG_I("GPT:first_usable_lbas don't match.");
LOG_I("GPT:%lld != %lld",
(uint64_t)(pgpt->first_usable_lba),
(uint64_t)(agpt->first_usable_lba));
error_found++;
}
if ((uint64_t)(pgpt->last_usable_lba) != (uint64_t)(agpt->last_usable_lba))
{
LOG_I("GPT:last_usable_lbas don't match.");
LOG_I("GPT:%lld != %lld",
(uint64_t)(pgpt->last_usable_lba),
(uint64_t)(agpt->last_usable_lba));
error_found++;
}
if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid))
{
LOG_I("GPT:disk_guids don't match.");
error_found++;
}
if ((pgpt->num_partition_entries) != (agpt->num_partition_entries))
{
LOG_I("GPT:num_partition_entries don't match: "
"0x%x != 0x%x",
(pgpt->num_partition_entries),
(agpt->num_partition_entries));
error_found++;
}
if ((pgpt->sizeof_partition_entry) != (agpt->sizeof_partition_entry))
{
LOG_I("GPT:sizeof_partition_entry values don't match: "
"0x%x != 0x%x",
(pgpt->sizeof_partition_entry),
(agpt->sizeof_partition_entry));
error_found++;
}
if ((pgpt->partition_entry_array_crc32) != (agpt->partition_entry_array_crc32))
{
LOG_I("GPT:partition_entry_array_crc32 values don't match: "
"0x%x != 0x%x",
(pgpt->partition_entry_array_crc32),
(agpt->partition_entry_array_crc32));
error_found++;
}
if ((pgpt->alternate_lba) != lastlba)
{
LOG_I("GPT:Primary header thinks Alt. header is not at the end of the disk.");
LOG_I("GPT:%lld != %lld",
(uint64_t)(pgpt->alternate_lba),
(size_t)lastlba);
error_found++;
}
if ((agpt->start_lba) != lastlba)
{
LOG_I("GPT:Alternate GPT header not at the end of the disk.");
LOG_I("GPT:%lld != %lld",
(uint64_t)(agpt->start_lba),
(size_t)lastlba);
error_found++;
}
if (error_found)
{
LOG_I("GPT: Use GNU Parted to correct GPT errors.");
}
return;
}
/**
* find_valid_gpt() - Search disk for valid GPT headers and PTEs
* state: disk parsed partitions
* gpt: GPT header ptr, filled on return.
* ptes: PTEs ptr, filled on return.
*
* Description: Returns 1 if valid, 0 on error.
* If valid, returns pointers to newly allocated GPT header and PTEs.
* Validity depends on PMBR being valid (or being overridden by the
* 'gpt' kernel command line option) and finding either the Primary
* GPT header and PTEs valid, or the Alternate GPT header and PTEs
* valid. If the Primary GPT header is not valid, the Alternate GPT header
* is not checked unless the 'gpt' kernel command line option is passed.
* This protects against devices which misreport their size, and forces
* the user to decide to use the Alternate GPT.
*/
static int find_valid_gpt(struct rt_mmcsd_card *card, gpt_header **gpt,
gpt_entry **ptes)
{
int good_pgpt = 0, good_agpt = 0, good_pmbr = 0;
gpt_header *pgpt = RT_NULL, *agpt = RT_NULL;
gpt_entry *pptes = RT_NULL, *aptes = RT_NULL;
legacy_mbr *legacymbr;
size_t total_sectors = last_lba(card) + 1;
size_t lastlba;
int status = 0;
if (!ptes)
{
return 0;
}
lastlba = last_lba(card);
if (!force_gpt)
{
/* This will be added to the EFI Spec. per Intel after v1.02. */
legacymbr = rt_malloc(512);
if (!legacymbr)
{
goto fail;
}
status = read_lba(card, 0, (uint8_t *)legacymbr, 1);
if (status)
{
LOG_I("status:%d", status);
goto fail;
}
good_pmbr = is_pmbr_valid(legacymbr, total_sectors);
rt_free(legacymbr);
if (!good_pmbr)
{
goto fail;
}
rt_kprintf("Device has a %s MBR\n",
good_pmbr == GPT_MBR_PROTECTIVE ?
"protective" : "hybrid");
}
good_pgpt = is_gpt_valid(card, GPT_PRIMARY_PARTITION_TABLE_LBA,
&pgpt, &pptes);
if (good_pgpt)
{
good_agpt = is_gpt_valid(card, (pgpt->alternate_lba), &agpt, &aptes);
if (!good_agpt && force_gpt)
{
good_agpt = is_gpt_valid(card, lastlba, &agpt, &aptes);
}
/* The obviously unsuccessful case */
if (!good_pgpt && !good_agpt)
{
goto fail;
}
compare_gpts(pgpt, agpt, lastlba);
/* The good cases */
if (good_pgpt)
{
*gpt = pgpt;
*ptes = pptes;
rt_free(agpt);
rt_free(aptes);
if (!good_agpt)
{
LOG_D("Alternate GPT is invalid, using primary GPT.");
}
return 1;
}
else if (good_agpt)
{
*gpt = agpt;
*ptes = aptes;
rt_free(pgpt);
rt_free(pptes);
LOG_D("Primary GPT is invalid, using alternate GPT.");
return 1;
}
}
fail:
rt_free(pgpt);
rt_free(agpt);
rt_free(pptes);
rt_free(aptes);
*gpt = RT_NULL;
*ptes = RT_NULL;
return 0;
}
int check_gpt(struct rt_mmcsd_card *card)
{
if (!find_valid_gpt(card, &_gpt, &_ptes) || !_gpt || !_ptes)
{
rt_free(_gpt);
rt_free(_ptes);
return MBR_TYPE;
}
return GPT_TYPE;
}
int gpt_get_partition_param(struct rt_mmcsd_card *card, struct dfs_partition *part, uint32_t pindex)
{
if (!is_pte_valid(&_ptes[pindex], last_lba(card)))
{
return -1;
}
part->offset = (off_t)(_ptes[pindex].starting_lba);
part->size = (_ptes[pindex].ending_lba) - (_ptes[pindex].starting_lba) + 1ULL;
rt_kprintf("found part[%d], begin(sector): %d, end(sector):%d size: ",
pindex, _ptes[pindex].starting_lba, _ptes[pindex].ending_lba);
if ((part->size >> 11) == 0)
{
rt_kprintf("%d%s", part->size >> 1, "KB\n"); /* KB */
}
else
{
unsigned int part_size;
part_size = part->size >> 11; /* MB */
if ((part_size >> 10) == 0)
rt_kprintf("%d.%d%s", part_size, (part->size >> 1) & 0x3FF, "MB\n");
else
rt_kprintf("%d.%d%s", part_size >> 10, part_size & 0x3FF, "GB\n");
}
return 0;
}
void gpt_free(void)
{
rt_free(_ptes);
rt_free(_gpt);
}