[DM/FEATURE] Support simple block layer

1. Disk and blk device management.
2. Support partitions probe auto.
3. Support DFS and user mode fops, ioctl.
4. Add a cmd for blk info.

Signed-off-by: GuEe-GUI <2991707448@qq.com>
This commit is contained in:
GuEe-GUI 2024-10-30 10:44:16 +08:00 committed by Rbb666
parent 10cac76d3b
commit c424cb8186
17 changed files with 2472 additions and 0 deletions

View File

@ -21,6 +21,7 @@ rsource "touch/Kconfig"
rsource "graphic/Kconfig"
rsource "hwcrypto/Kconfig"
rsource "wlan/Kconfig"
rsource "block/Kconfig"
rsource "virtio/Kconfig"
rsource "mfd/Kconfig"
rsource "ofw/Kconfig"

View File

@ -0,0 +1,7 @@
menuconfig RT_USING_BLK
bool "Using Block device drivers"
default n
if RT_USING_BLK
rsource "partitions/Kconfig"
endif

View File

@ -0,0 +1,23 @@
from building import *
group = []
objs = []
if not GetDepend(['RT_USING_BLK']):
Return('group')
cwd = GetCurrentDir()
list = os.listdir(cwd)
CPPPATH = [cwd + '/../include']
src = ['blk.c', 'blk_dev.c', 'blk_dfs.c', 'blk_partition.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
objs = objs + group
Return('objs')

View File

@ -0,0 +1,569 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#define DBG_TAG "rtdm.blk"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "blk_dev.h"
#include "blk_dfs.h"
static void blk_remove_all(struct rt_blk_disk *disk)
{
struct rt_blk_device *blk, *blk_next;
/* Remove all partitions */
rt_list_for_each_entry_safe(blk, blk_next, &disk->part_nodes, list)
{
disk_remove_blk_dev(blk, RT_TRUE);
}
}
static rt_err_t blk_open(rt_device_t dev, rt_uint16_t oflag)
{
struct rt_blk_disk *disk = to_blk_disk(dev);
if (disk->read_only && (oflag & RT_DEVICE_OFLAG_WRONLY))
{
return -RT_EINVAL;
}
return RT_EOK;
}
static rt_err_t blk_close(rt_device_t dev)
{
return RT_EOK;
}
static rt_ssize_t blk_read(rt_device_t dev, rt_off_t sector,
void *buffer, rt_size_t sector_count)
{
rt_ssize_t res;
struct rt_blk_disk *disk = to_blk_disk(dev);
rt_sem_take(&disk->usr_lock, RT_WAITING_FOREVER);
res = disk->ops->read(disk, sector, buffer, sector_count);
rt_sem_release(&disk->usr_lock);
return res;
}
static rt_ssize_t blk_write(rt_device_t dev, rt_off_t sector,
const void *buffer, rt_size_t sector_count)
{
rt_ssize_t res;
struct rt_blk_disk *disk = to_blk_disk(dev);
if (!disk->read_only)
{
rt_sem_take(&disk->usr_lock, RT_WAITING_FOREVER);
res = disk->ops->write(disk, sector, buffer, sector_count);
rt_sem_release(&disk->usr_lock);
return res;
}
return -RT_ENOSYS;
}
static rt_ssize_t blk_parallel_read(rt_device_t dev, rt_off_t sector,
void *buffer, rt_size_t sector_count)
{
struct rt_blk_disk *disk = to_blk_disk(dev);
return disk->ops->read(disk, sector, buffer, sector_count);
}
static rt_ssize_t blk_parallel_write(rt_device_t dev, rt_off_t sector,
const void *buffer, rt_size_t sector_count)
{
struct rt_blk_disk *disk = to_blk_disk(dev);
if (!disk->read_only)
{
return disk->ops->write(disk, sector, buffer, sector_count);
}
return -RT_ENOSYS;
}
static rt_err_t blk_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t err;
struct rt_blk_disk *disk = to_blk_disk(dev);
switch (cmd)
{
case RT_DEVICE_CTRL_BLK_GETGEOME:
if (args)
{
err = disk->ops->getgeome(disk, args);
}
else
{
err = -RT_EINVAL;
}
break;
case RT_DEVICE_CTRL_BLK_SYNC:
if (disk->ops->sync)
{
rt_sem_take(&disk->usr_lock, RT_WAITING_FOREVER);
spin_lock(&disk->lock);
err = disk->ops->sync(disk);
spin_unlock(&disk->lock);
rt_sem_release(&disk->usr_lock);
}
else
{
err = -RT_ENOSYS;
}
break;
case RT_DEVICE_CTRL_BLK_ERASE:
if (disk->ops->erase)
{
rt_sem_take(&disk->usr_lock, RT_WAITING_FOREVER);
spin_lock(&disk->lock);
if (disk->parent.ref_count != 1)
{
err = -RT_EBUSY;
goto _unlock;
}
blk_remove_all(disk);
err = disk->ops->erase(disk);
_unlock:
spin_unlock(&disk->lock);
rt_sem_release(&disk->usr_lock);
}
else
{
err = -RT_ENOSYS;
}
break;
case RT_DEVICE_CTRL_BLK_AUTOREFRESH:
if (disk->ops->autorefresh)
{
err = disk->ops->autorefresh(disk, !!args);
}
else
{
err = -RT_ENOSYS;
}
break;
case RT_DEVICE_CTRL_BLK_PARTITION:
err = -RT_EINVAL;
break;
case RT_DEVICE_CTRL_BLK_SSIZEGET:
device_get_blk_ssize(dev, args);
err = RT_EOK;
break;
case RT_DEVICE_CTRL_ALL_BLK_SSIZEGET:
device_get_all_blk_ssize(dev, args);
err = RT_EOK;
break;
default:
if (disk->ops->control)
{
err = disk->ops->control(disk, RT_NULL, cmd, args);
}
break;
}
return err;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops blk_ops =
{
.open = blk_open,
.close = blk_close,
.read = blk_read,
.write = blk_write,
.control = blk_control,
};
const static struct rt_device_ops blk_parallel_ops =
{
.open = blk_open,
.close = blk_close,
.read = blk_parallel_read,
.write = blk_parallel_write,
.control = blk_control,
};
#endif /* RT_USING_DEVICE_OPS */
rt_err_t rt_hw_blk_disk_register(struct rt_blk_disk *disk)
{
rt_err_t err;
#ifdef RT_USING_DM
int device_id;
#endif
const char *disk_name;
rt_uint16_t flags = RT_DEVICE_FLAG_RDONLY;
if (!disk || !disk->ops)
{
return -RT_EINVAL;
}
#ifdef RT_USING_DM
if (!disk->ida)
{
return -RT_EINVAL;
}
#endif
#if RT_NAME_MAX > 0
if (disk->parent.parent.name[0] == '\0')
#else
if (disk->parent.parent.name)
#endif
{
return -RT_EINVAL;
}
#ifdef RT_USING_DM
if ((device_id = rt_dm_ida_alloc(disk->ida)) < 0)
{
return -RT_EFULL;
}
#endif
disk->__magic = RT_BLK_DISK_MAGIC;
disk_name = to_disk_name(disk);
err = rt_sem_init(&disk->usr_lock, disk_name, 1, RT_IPC_FLAG_PRIO);
if (err)
{
#ifdef RT_USING_DM
rt_dm_ida_free(disk->ida, device_id);
#endif
LOG_E("%s: Init user mutex error = %s", rt_strerror(err));
return err;
}
rt_list_init(&disk->part_nodes);
rt_spin_lock_init(&disk->lock);
disk->parent.type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
if (disk->parallel_io)
{
disk->parent.ops = &blk_parallel_ops;
}
else
{
disk->parent.ops = &blk_ops;
}
#else
disk->parent.open = blk_open;
disk->parent.close = blk_close;
if (disk->parallel_io)
{
disk->parent.read = blk_parallel_read;
disk->parent.write = blk_parallel_write;
}
else
{
disk->parent.read = blk_read;
disk->parent.write = blk_write;
}
disk->parent.control = blk_control;
#endif
if (!disk->ops->write)
{
disk->read_only = RT_TRUE;
}
if (!disk->read_only)
{
flags |= RT_DEVICE_FLAG_WRONLY;
}
#ifdef RT_USING_DM
disk->parent.master_id = disk->ida->master_id;
disk->parent.device_id = device_id;
#endif
device_set_blk_fops(&disk->parent);
err = rt_device_register(&disk->parent, disk_name, flags);
if (err)
{
rt_sem_detach(&disk->usr_lock);
}
/* Ignore partition scanning errors */
rt_blk_disk_probe_partition(disk);
return err;
}
rt_err_t rt_hw_blk_disk_unregister(struct rt_blk_disk *disk)
{
rt_err_t err;
if (!disk)
{
return -RT_EINVAL;
}
spin_lock(&disk->lock);
if (disk->parent.ref_count != 1)
{
err = -RT_EBUSY;
goto _unlock;
}
/* Flush all data */
if (disk->ops->sync)
{
err = disk->ops->sync(disk);
if (err)
{
LOG_E("%s: Sync error = %s", to_disk_name(disk), rt_strerror(err));
goto _unlock;
}
}
rt_sem_detach(&disk->usr_lock);
blk_remove_all(disk);
#ifdef RT_USING_DM
rt_dm_ida_free(disk->ida, disk->parent.device_id);
#endif
err = rt_device_unregister(&disk->parent);
_unlock:
spin_unlock(&disk->lock);
return err;
}
rt_ssize_t rt_blk_disk_get_capacity(struct rt_blk_disk *disk)
{
rt_ssize_t res;
struct rt_device_blk_geometry geometry;
if (!disk)
{
return -RT_EINVAL;
}
res = disk->ops->getgeome(disk, &geometry);
if (!res)
{
return geometry.sector_count;
}
return res;
}
rt_ssize_t rt_blk_disk_get_logical_block_size(struct rt_blk_disk *disk)
{
rt_ssize_t res;
struct rt_device_blk_geometry geometry;
if (!disk)
{
return -RT_EINVAL;
}
res = disk->ops->getgeome(disk, &geometry);
if (!res)
{
return geometry.bytes_per_sector;
}
return res;
}
#ifdef RT_USING_DFS_MNTTABLE
static int blk_dfs_mnt_table(void)
{
rt_ubase_t level;
struct rt_object *obj;
struct rt_device *dev;
struct rt_blk_disk *disk;
struct rt_blk_device *blk_dev;
struct rt_object_information *info = rt_object_get_information(RT_Object_Class_Device);
level = rt_hw_interrupt_disable();
rt_list_for_each_entry(obj, &info->object_list, list)
{
dev = rt_container_of(obj, struct rt_device, parent);
if (dev->type != RT_Device_Class_Block)
{
continue;
}
disk = to_blk_disk(dev);
if (disk->__magic != RT_BLK_DISK_MAGIC)
{
continue;
}
if (disk->max_partitions == RT_BLK_PARTITION_NONE)
{
dfs_mount_device(&disk->parent);
continue;
}
rt_list_for_each_entry(blk_dev, &disk->part_nodes, list)
{
dfs_mount_device(&blk_dev->parent);
}
}
rt_hw_interrupt_enable(level);
return 0;
}
INIT_ENV_EXPORT(blk_dfs_mnt_table);
#endif /* RT_USING_DFS_MNTTABLE */
#if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
const char *convert_size(struct rt_device_blk_geometry *geome,
rt_size_t sector_count, rt_size_t *out_cap, rt_size_t *out_minor)
{
rt_size_t cap, minor;
int size_index = 0;
const char *size_name[] = { "B", "K", "M", "G", "T", "P", "E" };
cap = geome->bytes_per_sector * sector_count;
for (size_index = 0; size_index < RT_ARRAY_SIZE(size_name) - 1; ++size_index)
{
if (cap < 1024)
{
break;
}
/* Only one decimal point */
minor = (cap % 1024) * 10 / 1024;
cap = cap / 1024;
}
*out_cap = cap;
*out_minor = minor;
return size_name[size_index];
}
static int list_blk(int argc, char**argv)
{
rt_ubase_t level;
rt_size_t cap, minor;
const char *size_name;
struct rt_object *obj;
struct rt_device *dev;
struct rt_blk_disk *disk;
struct rt_blk_device *blk_dev;
struct rt_device_blk_geometry geome;
struct rt_object_information *info = rt_object_get_information(RT_Object_Class_Device);
level = rt_hw_interrupt_disable();
rt_kprintf("%-*.s MAJ:MIN RM SIZE\tRO TYPE MOUNTPOINT\n", RT_NAME_MAX, "NAME");
rt_list_for_each_entry(obj, &info->object_list, list)
{
dev = rt_container_of(obj, struct rt_device, parent);
if (dev->type != RT_Device_Class_Block)
{
continue;
}
disk = to_blk_disk(dev);
if (disk->__magic != RT_BLK_DISK_MAGIC)
{
continue;
}
if (disk->ops->getgeome(disk, &geome))
{
continue;
}
size_name = convert_size(&geome, geome.sector_count, &cap, &minor);
rt_kprintf("%-*.s %3u.%-3u %u %u.%u%s\t%u disk %s\n",
RT_NAME_MAX, to_disk_name(disk),
#ifdef RT_USING_DM
disk->parent.master_id, disk->parent.device_id,
#else
0, 0,
#endif
disk->removable, cap, minor, size_name, disk->read_only,
disk->max_partitions != RT_BLK_PARTITION_NONE ? "\b" :
(dfs_filesystem_get_mounted_path(&disk->parent) ? : "\b"));
rt_list_for_each_entry(blk_dev, &disk->part_nodes, list)
{
size_name = convert_size(&geome, blk_dev->sector_count, &cap, &minor);
rt_kprintf("%c--%-*.s %3u.%-3u %u %u.%u%s\t%u part %s\n",
blk_dev->list.next != &disk->part_nodes ? '|' : '`',
RT_NAME_MAX - 3, to_blk_name(blk_dev),
#ifdef RT_USING_DM
blk_dev->parent.master_id, blk_dev->parent.device_id,
#else
0, 0,
#endif
disk->removable, cap, minor, size_name, disk->read_only,
dfs_filesystem_get_mounted_path(&blk_dev->parent) ? : "");
}
}
rt_hw_interrupt_enable(level);
return 0;
}
MSH_CMD_EXPORT(list_blk, dump all of blks information);
#endif /* RT_USING_CONSOLE && RT_USING_MSH */

View File

@ -0,0 +1,297 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI first version
*/
#include "blk_dev.h"
#include "blk_dfs.h"
#define DBG_TAG "blk.dm"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#ifdef RT_USING_DFS
#include <dfs_fs.h>
#endif
static rt_err_t blk_dev_open(rt_device_t dev, rt_uint16_t oflag)
{
struct rt_blk_device *blk = to_blk(dev);
return rt_device_open(&blk->disk->parent, oflag);
}
static rt_err_t blk_dev_close(rt_device_t dev)
{
struct rt_blk_device *blk = to_blk(dev);
return rt_device_close(&blk->disk->parent);
}
static rt_ssize_t blk_dev_read(rt_device_t dev, rt_off_t sector,
void *buffer, rt_size_t sector_count)
{
struct rt_blk_device *blk = to_blk(dev);
if (sector <= blk->sector_start + blk->sector_count &&
sector_count <= blk->sector_count)
{
return rt_device_read(&blk->disk->parent,
blk->sector_start + sector, buffer, sector_count);
}
return -RT_EINVAL;
}
static rt_ssize_t blk_dev_write(rt_device_t dev, rt_off_t sector,
const void *buffer, rt_size_t sector_count)
{
struct rt_blk_device *blk = to_blk(dev);
if (sector <= blk->sector_start + blk->sector_count &&
sector_count <= blk->sector_count)
{
return rt_device_write(&blk->disk->parent,
blk->sector_start + sector, buffer, sector_count);
}
return -RT_EINVAL;
}
static rt_err_t blk_dev_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t err = -RT_EINVAL;
struct rt_blk_device *blk = to_blk(dev);
struct rt_blk_disk *disk = blk->disk;
struct rt_device_blk_geometry disk_geometry, *geometry;
switch (cmd)
{
case RT_DEVICE_CTRL_BLK_GETGEOME:
if ((geometry = args))
{
if (!(err = disk->ops->getgeome(disk, &disk_geometry)))
{
geometry->bytes_per_sector = disk_geometry.bytes_per_sector;
geometry->block_size = disk_geometry.block_size;
geometry->sector_count = blk->sector_count;
}
}
else
{
err = -RT_EINVAL;
}
break;
case RT_DEVICE_CTRL_BLK_SYNC:
rt_device_control(&disk->parent, cmd, args);
break;
case RT_DEVICE_CTRL_BLK_ERASE:
case RT_DEVICE_CTRL_BLK_AUTOREFRESH:
if (disk->partitions <= 1)
{
rt_device_control(&disk->parent, cmd, args);
}
else
{
err = -RT_EIO;
}
break;
case RT_DEVICE_CTRL_BLK_PARTITION:
if (args)
{
rt_memcpy(args, &blk->partition, sizeof(blk->partition));
}
else
{
err = -RT_EINVAL;
}
break;
case RT_DEVICE_CTRL_BLK_SSIZEGET:
device_get_blk_ssize(dev, args);
err = RT_EOK;
break;
case RT_DEVICE_CTRL_ALL_BLK_SSIZEGET:
device_get_all_blk_ssize(dev, args);
err = RT_EOK;
break;
default:
if (disk->ops->control)
{
err = disk->ops->control(disk, blk, cmd, args);
}
break;
}
return err;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops blk_dev_ops =
{
.open = blk_dev_open,
.close = blk_dev_close,
.read = blk_dev_read,
.write = blk_dev_write,
.control = blk_dev_control,
};
#endif
rt_err_t blk_dev_initialize(struct rt_blk_device *blk)
{
struct rt_device *dev;
if (!blk)
{
return -RT_EINVAL;
}
dev = &blk->parent;
dev->type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
dev->ops = &blk_dev_ops;
#else
dev->open = blk_dev_open;
dev->close = blk_dev_close;
dev->read = blk_dev_read;
dev->write = blk_dev_write;
dev->control = blk_dev_control;
#endif
return RT_EOK;
}
rt_err_t disk_add_blk_dev(struct rt_blk_disk *disk, struct rt_blk_device *blk)
{
rt_err_t err;
#ifdef RT_USING_DM
int device_id;
#endif
const char *disk_name, *name_fmt;
if (!disk || !blk)
{
return -RT_EINVAL;
}
#ifdef RT_USING_DM
if ((device_id = rt_dm_ida_alloc(disk->ida)) < 0)
{
return -RT_EFULL;
}
#endif
blk->disk = disk;
rt_list_init(&blk->list);
disk_name = to_disk_name(disk);
/* End is [a-zA-Z] or [0-9] */
if (disk_name[rt_strlen(disk_name) - 1] < 'a')
{
name_fmt = "%sp%d";
}
else
{
name_fmt = "%s%d";
}
#ifdef RT_USING_DM
rt_dm_dev_set_name(&blk->parent, name_fmt, disk_name, blk->partno);
blk->parent.master_id = disk->ida->master_id;
blk->parent.device_id = device_id;
#else
rt_snprintf(blk->parent.parent.name, RT_NAME_MAX, name_fmt, disk_name, blk->partno);
#endif
device_set_blk_fops(&blk->parent);
err = rt_device_register(&blk->parent, to_blk_name(blk),
disk->parent.flag & RT_DEVICE_FLAG_RDWR);
if (err)
{
#ifdef RT_USING_DM
rt_dm_ida_free(disk->ida, device_id);
#endif
return err;
}
spin_lock(&disk->lock);
rt_list_insert_before(&disk->part_nodes, &blk->list);
spin_unlock(&disk->lock);
return RT_EOK;
}
rt_err_t disk_remove_blk_dev(struct rt_blk_device *blk, rt_bool_t lockless)
{
struct rt_blk_disk *disk;
if (!blk)
{
return -RT_EINVAL;
}
disk = blk->disk;
if (!disk)
{
return -RT_EINVAL;
}
else
{
#ifdef RT_USING_DFS
const char *mountpath;
if ((mountpath = dfs_filesystem_get_mounted_path(&blk->parent)))
{
dfs_unmount(mountpath);
LOG_D("%s: Unmount file system on %s",
to_blk_name(blk), mountpath);
}
#endif
}
#ifdef RT_USING_DM
rt_dm_ida_free(disk->ida, blk->parent.device_id);
#endif
rt_device_unregister(&blk->parent);
if (!lockless)
{
spin_lock(&disk->lock);
}
rt_list_remove(&blk->list);
if (!lockless)
{
spin_unlock(&disk->lock);
}
--disk->partitions;
return RT_EOK;
}
rt_uint32_t blk_request_ioprio(void)
{
struct rt_thread *task = rt_thread_self();
return task ? RT_SCHED_PRIV(task).current_priority : 0;
}

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI first version
*/
#ifndef __BLK_DEV_H__
#define __BLK_DEV_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/blk.h>
#include <drivers/misc.h>
#define to_blk_disk(dev) rt_container_of(dev, struct rt_blk_disk, parent)
#define to_blk(dev) rt_container_of(dev, struct rt_blk_device, parent)
#ifdef RT_USING_DM
#define to_disk_name(disk) rt_dm_dev_get_name(&(disk)->parent)
#define to_blk_name(blk) rt_dm_dev_get_name(&(blk)->parent)
#else
#define to_disk_name(disk) (disk)->parent.parent.name
#define to_blk_name(blk) (blk)->parent.parent.name
#endif
/* %c%c name */
#define letter_name(n) ('a' + (n) / ((n) >= 26 ? (26 * 2) : 1)), ((n) >= 26 ? 'a' + (n) % 26 : '\0')
rt_inline void spin_lock(struct rt_spinlock *spinlock)
{
rt_hw_spin_lock(&spinlock->lock);
}
rt_inline void spin_unlock(struct rt_spinlock *spinlock)
{
rt_hw_spin_unlock(&spinlock->lock);
}
rt_err_t blk_dev_initialize(struct rt_blk_device *blk);
rt_err_t disk_add_blk_dev(struct rt_blk_disk *disk, struct rt_blk_device *blk);
rt_err_t disk_remove_blk_dev(struct rt_blk_device *blk, rt_bool_t lockless);
rt_uint32_t blk_request_ioprio(void);
#endif /* __BLK_DEV_H__ */

View File

@ -0,0 +1,274 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-08-08 GuEe-GUI first version
*/
#include "blk_dfs.h"
#include <dfs_file.h>
#include <drivers/classes/block.h>
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_DFS_V2)
struct blk_fops_data
{
struct rt_device_blk_geometry geometry;
};
static int blk_fops_open(struct dfs_file *file)
{
struct rt_device *dev = file->vnode->data;
struct blk_fops_data *data = rt_malloc(sizeof(*data));
if (!data)
{
return (int)-RT_ENOMEM;
}
dev->user_data = data;
rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &data->geometry);
rt_device_control(dev, RT_DEVICE_CTRL_ALL_BLK_SSIZEGET, &file->vnode->size);
return 0;
}
static int blk_fops_close(struct dfs_file *file)
{
struct rt_device *dev = file->vnode->data;
rt_free(dev->user_data);
dev->user_data = RT_NULL;
return 0;
}
static int blk_fops_ioctl(struct dfs_file *file, int cmd, void *arg)
{
struct rt_device *dev = file->vnode->data;
return (int)rt_device_control(dev, cmd, arg);
}
static ssize_t blk_fops_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
{
void *rbuf;
rt_ssize_t res = 0;
int bytes_per_sector, blk_pos, first_offs, rsize = 0;
struct rt_device *dev = file->vnode->data;
struct blk_fops_data *data = dev->user_data;
bytes_per_sector = data->geometry.bytes_per_sector;
blk_pos = *pos / bytes_per_sector;
first_offs = *pos % bytes_per_sector;
if ((rbuf = rt_malloc(bytes_per_sector)))
{
/*
** #1: read first unalign block size.
*/
res = rt_device_read(dev, blk_pos, rbuf, 1);
if (res == 1)
{
if (count > bytes_per_sector - first_offs)
{
rsize = bytes_per_sector - first_offs;
}
else
{
rsize = count;
}
rt_memcpy(buf, rbuf + first_offs, rsize);
++blk_pos;
/*
** #2: read continuous block size.
*/
while (rsize < count)
{
res = rt_device_read(dev, blk_pos++, rbuf, 1);
if (res != 1)
{
break;
}
if (count - rsize >= bytes_per_sector)
{
rt_memcpy(buf + rsize, rbuf, bytes_per_sector);
rsize += bytes_per_sector;
}
else
{
rt_memcpy(buf + rsize, rbuf, count - rsize);
rsize = count;
}
}
*pos += rsize;
}
rt_free(rbuf);
}
return rsize;
}
static ssize_t blk_fops_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
{
void *rbuf;
rt_ssize_t res = 0;
int bytes_per_sector, blk_pos, first_offs, wsize = 0;
struct rt_device *dev = file->vnode->data;
struct blk_fops_data *data = dev->user_data;
bytes_per_sector = data->geometry.bytes_per_sector;
blk_pos = *pos / bytes_per_sector;
first_offs = *pos % bytes_per_sector;
/*
** #1: write first unalign block size.
*/
if (first_offs != 0)
{
if (count > bytes_per_sector - first_offs)
{
wsize = bytes_per_sector - first_offs;
}
else
{
wsize = count;
}
if ((rbuf = rt_malloc(bytes_per_sector)))
{
res = rt_device_read(dev, blk_pos, rbuf, 1);
if (res == 1)
{
rt_memcpy(rbuf + first_offs, buf, wsize);
res = rt_device_write(dev, blk_pos, (const void *)rbuf, 1);
if (res == 1)
{
blk_pos += 1;
rt_free(rbuf);
goto _goon;
}
}
rt_free(rbuf);
}
return 0;
}
_goon:
/*
** #2: write continuous block size.
*/
if ((count - wsize) / bytes_per_sector != 0)
{
res = rt_device_write(dev, blk_pos, buf + wsize, (count - wsize) / bytes_per_sector);
wsize += res * bytes_per_sector;
blk_pos += res;
if (res != (count - wsize) / bytes_per_sector)
{
*pos += wsize;
return wsize;
}
}
/*
** # 3: write last unalign block size.
*/
if ((count - wsize) != 0)
{
if ((rbuf = rt_malloc(bytes_per_sector)))
{
res = rt_device_read(dev, blk_pos, rbuf, 1);
if (res == 1)
{
rt_memcpy(rbuf, buf + wsize, count - wsize);
res = rt_device_write(dev, blk_pos, (const void *)rbuf, 1);
if (res == 1)
{
wsize += count - wsize;
}
}
rt_free(rbuf);
}
}
*pos += wsize;
return wsize;
}
static int blk_fops_flush(struct dfs_file *file)
{
struct rt_device *dev = file->vnode->data;
return (int)rt_device_control(dev, RT_DEVICE_CTRL_BLK_SYNC, RT_NULL);
}
static int blk_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
{
int mask = 0;
return mask;
}
const static struct dfs_file_ops blk_fops =
{
.open = blk_fops_open,
.close = blk_fops_close,
.ioctl = blk_fops_ioctl,
.read = blk_fops_read,
.write = blk_fops_write,
.flush = blk_fops_flush,
.lseek = generic_dfs_lseek,
.poll = blk_fops_poll
};
void device_set_blk_fops(struct rt_device *dev)
{
dev->fops = &blk_fops;
}
#else
void device_set_blk_fops(struct rt_device *dev)
{
}
#endif /* RT_USING_POSIX_DEVIO && RT_USING_DFS_V2 */
void device_get_blk_ssize(struct rt_device *dev, void *args)
{
rt_uint32_t bytes_per_sector;
struct rt_device_blk_geometry geometry;
rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &geometry);
bytes_per_sector = geometry.bytes_per_sector;
RT_ASSERT(sizeof(bytes_per_sector) == sizeof(geometry.bytes_per_sector));
rt_memcpy(args, &bytes_per_sector, sizeof(bytes_per_sector));
}
void device_get_all_blk_ssize(struct rt_device *dev, void *args)
{
rt_uint64_t count_mul_per;
struct rt_device_blk_geometry geometry;
rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &geometry);
count_mul_per = geometry.bytes_per_sector * geometry.sector_count;
rt_memcpy(args, &count_mul_per, sizeof(count_mul_per));
}

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-08-08 GuEe-GUI first version
*/
#ifndef __BLK_DFS_H__
#define __BLK_DFS_H__
#include <rtdef.h>
#define RT_DEVICE_CTRL_BLK_SSIZEGET 0x00001268 /**< get number of bytes per sector */
#define RT_DEVICE_CTRL_ALL_BLK_SSIZEGET 0x80081272 /**< get number of bytes per sector * sector counts */
void device_set_blk_fops(struct rt_device *dev);
void device_get_blk_ssize(struct rt_device *dev, void *args);
void device_get_all_blk_ssize(struct rt_device *dev, void *args);
#endif /* __BLK_DFS_H__ */

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI the first version
*/
#define DBG_TAG "blk.part"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "blk_partition.h"
static rt_err_t (*partition_list[])(struct rt_blk_disk *) =
{
#ifdef RT_BLK_PARTITION_EFI
efi_partition,
#endif
#ifdef RT_BLK_PARTITION_DFS
dfs_partition,
#endif
};
rt_err_t blk_put_partition(struct rt_blk_disk *disk, const char *type,
rt_size_t start, rt_size_t count, int partno)
{
rt_err_t err;
struct rt_blk_device *blk = rt_calloc(1, sizeof(*blk));
if (type && rt_strcmp(type, "dfs"))
{
rt_uint32_t ssz = rt_blk_disk_get_logical_block_size(disk);
rt_kprintf("found part[%u], begin: %lu, size: ", partno, start * ssz);
if ((count >> 11) == 0)
{
rt_kprintf("%u%cB\n", count >> 1, 'K'); /* KB */
}
else
{
rt_uint32_t size_mb = count >> 11; /* MB */
if ((size_mb >> 10) == 0)
{
rt_kprintf("%u.%u%cB\n", size_mb, (count >> 1) & 0x3ff, 'M');
}
else
{
rt_kprintf("%u.%u%cB\n", size_mb >> 10, size_mb & 0x3ff, 'G');
}
}
}
if (!blk)
{
err = -RT_ENOMEM;
goto _fail;
}
err = blk_dev_initialize(blk);
if (err)
{
goto _fail;
}
blk->partno = partno;
blk->sector_start = start;
blk->sector_count = count;
blk->partition.offset = start;
blk->partition.size = count;
blk->partition.lock = &disk->usr_lock;
err = disk_add_blk_dev(disk, blk);
if (err)
{
goto _fail;
}
++disk->partitions;
return RT_EOK;
_fail:
LOG_E("%s: Put partition.%s[%u] start = %lu count = %lu error = %s",
to_disk_name(disk), type, partno, start, count, rt_strerror(err));
if (blk)
{
rt_free(blk);
}
return err;
}
rt_err_t rt_blk_disk_probe_partition(struct rt_blk_disk *disk)
{
rt_err_t err = RT_EOK;
if (!disk)
{
return -RT_EINVAL;
}
LOG_D("%s: Probing disk partitions", to_disk_name(disk));
if (disk->partitions)
{
return err;
}
err = -RT_EEMPTY;
if (disk->max_partitions == RT_BLK_PARTITION_NONE)
{
LOG_D("%s: Unsupported partitions", to_disk_name(disk));
return err;
}
for (int i = 0; i < RT_ARRAY_SIZE(partition_list); ++i)
{
rt_err_t part_err = partition_list[i](disk);
if (part_err == -RT_ENOMEM)
{
err = part_err;
break;
}
if (!part_err)
{
err = RT_EOK;
break;
}
}
if ((err && err != -RT_ENOMEM) || disk->partitions == 0)
{
/* No partition found */
rt_size_t total_sectors = rt_blk_disk_get_capacity(disk);
err = blk_put_partition(disk, RT_NULL, 0, total_sectors, 0);
}
return err;
}

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI first version
*/
#ifndef __BLK_PARTITION_H__
#define __BLK_PARTITION_H__
#include "blk_dev.h"
rt_err_t blk_put_partition(struct rt_blk_disk *disk, const char *type,
rt_size_t start, rt_size_t count, int partno);
rt_err_t dfs_partition(struct rt_blk_disk *disk);
rt_err_t efi_partition(struct rt_blk_disk *disk);
#endif /* __BLK_PARTITION_H__ */

View File

@ -0,0 +1,12 @@
menu "Partition Types"
config RT_BLK_PARTITION_DFS
bool "DFS Partition support"
depends on RT_USING_DFS
default y
config RT_BLK_PARTITION_EFI
bool "EFI Globally Unique Identifier (GUID) Partition support"
default y
endmenu

View File

@ -0,0 +1,18 @@
from building import *
group = []
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../../include']
src = []
if GetDepend(['RT_BLK_PARTITION_DFS']):
src += ['dfs.c']
if GetDepend(['RT_BLK_PARTITION_EFI']):
src += ['efi.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-07-25 weety first version
* 2023-02-25 GuEe-GUI make blk interface
*/
#include "efi.h"
#define DBG_TAG "blk.part.dfs"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
rt_err_t dfs_partition(struct rt_blk_disk *disk)
{
rt_ssize_t res;
struct dfs_partition part;
rt_uint8_t *sector = rt_malloc(rt_blk_disk_get_logical_block_size(disk));
if (!sector)
{
return -RT_ENOMEM;
}
res = disk->ops->read(disk, 0, sector, 1);
if (res < 0)
{
rt_free(sector);
return res;
}
for (rt_size_t i = 0; i < disk->max_partitions; ++i)
{
res = dfs_filesystem_get_partition(&part, sector, i);
if (res)
{
break;
}
if (blk_put_partition(disk, "dfs", part.offset, part.size, i) == -RT_ENOMEM)
{
break;
}
}
return RT_EOK;
}

View File

@ -0,0 +1,738 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-05-05 linzhenxing first version
* 2023-02-25 GuEe-GUI make blk interface
*/
#include "efi.h"
#define DBG_TAG "blk.part.efi"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static rt_bool_t force_gpt = 0;
static int force_gpt_setup(void)
{
#ifdef RT_USING_OFW
force_gpt = !!rt_ofw_bootargs_select("gpt", 0);
#endif
return 0;
}
INIT_CORE_EXPORT(force_gpt_setup);
/**
* @brief This function is EFI version of crc32 function.
*
* @param buf the buffer to calculate crc32 of.
* @param len the length of buf.
* @return EFI-style CRC32 value for @buf.
*/
rt_inline rt_uint32_t efi_crc32(const rt_uint8_t *buf, rt_size_t len)
{
rt_ubase_t crc = 0xffffffffUL;
for (rt_size_t i = 0; i < len; ++i)
{
crc ^= buf[i];
for (int j = 0; j < 8; ++j)
{
crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320L : 0);
}
}
return ~crc;
}
/**
* @brief This function get number of last logical block of device.
*
* @param disk the blk of disk.
* @return last LBA value on success, 0 on error.
* This is stored (by sd and ide-geometry) in
* the part[0] entry for this disk, and is the number of
* physical sectors available on the disk.
*/
static rt_size_t last_lba(struct rt_blk_disk *disk)
{
return rt_blk_disk_get_capacity(disk) - 1ULL;
}
rt_inline int pmbr_part_valid(gpt_mbr_record *part)
{
if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT)
{
return 0;
}
/* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */
if (rt_le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA)
{
return 0;
}
return GPT_MBR_PROTECTIVE;
}
/**
* @brief This function test Protective MBR for validity.
*
* @param mbr the pointer to a legacy mbr structure.
* @param total_sectors the amount of sectors in the device
* @return
* 0 -> Invalid MBR
* 1 -> GPT_MBR_PROTECTIVE
* 2 -> GPT_MBR_HYBRID
*/
static int is_pmbr_valid(legacy_mbr *mbr, rt_size_t total_sectors)
{
rt_uint32_t sz = 0;
int part = 0, ret = 0; /* invalid by default */
if (!mbr || rt_le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
{
goto _done;
}
for (int i = 0; i < 4; ++i)
{
ret = pmbr_part_valid(&mbr->partition_record[i]);
if (ret == GPT_MBR_PROTECTIVE)
{
part = i;
/*
* Ok, we at least know that there's a protective MBR,
* now check if there are other partition types for
* hybrid MBR.
*/
goto _check_hybrid;
}
}
if (ret != GPT_MBR_PROTECTIVE)
{
goto _done;
}
_check_hybrid:
for (int i = 0; i < 4; i++)
{
if (mbr->partition_record[i].os_type != EFI_PMBR_OSTYPE_EFI_GPT &&
mbr->partition_record[i].os_type != 0x00)
{
ret = GPT_MBR_HYBRID;
}
}
/*
* Protective MBRs take up the lesser of the whole disk
* or 2 TiB (32bit LBA), ignoring the rest of the disk.
* Some partitioning programs, nonetheless, choose to set
* the size to the maximum 32-bit limitation, disregarding
* the disk size.
*
* Hybrid MBRs do not necessarily comply with this.
*
* Consider a bad value here to be a warning to support dd'ing
* an image from a smaller disk to a larger disk.
*/
if (ret == GPT_MBR_PROTECTIVE)
{
sz = rt_le32_to_cpu(mbr->partition_record[part].size_in_lba);
if (sz != (rt_uint32_t)total_sectors - 1 && sz != 0xffffffff)
{
LOG_W("GPT: mbr size in lba (%u) different than whole disk (%u)",
sz, rt_min_t(rt_uint32_t, total_sectors - 1, 0xffffffff));
}
}
_done:
return ret;
}
/**
* @brief This function read bytes from disk, starting at given LBA.
*
* @param disk the blk of disk.
* @param lba the Logical Block Address of the partition table.
* @param buffer the destination buffer.
* @param count the bytes to read.
* @return number of bytes read on success, 0 on error.
*/
static rt_size_t read_lba(struct rt_blk_disk *disk,
rt_uint64_t lba, rt_uint8_t *buffer, rt_size_t count)
{
rt_size_t totalreadcount = 0;
if (!buffer || lba > last_lba(disk))
{
return 0;
}
for (rt_uint64_t n = lba; count; ++n)
{
int copied = 512;
disk->ops->read(disk, n, buffer, 1);
if (copied > count)
{
copied = count;
}
buffer += copied;
totalreadcount += copied;
count -= copied;
}
return totalreadcount;
}
/**
* @brief This function reads partition entries from disk.
*
* @param disk the blk of disk.
* @param gpt the GPT header
* @return ptes on success, null on error.
*/
static gpt_entry *alloc_read_gpt_entries(struct rt_blk_disk *disk,
gpt_header *gpt)
{
rt_size_t count;
gpt_entry *pte;
rt_uint64_t entry_lba;
if (!gpt)
{
return RT_NULL;
}
count = (rt_size_t)rt_le32_to_cpu(gpt->num_partition_entries) *
rt_le32_to_cpu(gpt->sizeof_partition_entry);
if (!count)
{
return RT_NULL;
}
pte = rt_malloc(count);
if (!pte)
{
return RT_NULL;
}
entry_lba = rt_le64_to_cpu(gpt->partition_entry_lba);
if (read_lba(disk, entry_lba, (rt_uint8_t *)pte, count) < count)
{
rt_free(pte);
pte = RT_NULL;
return RT_NULL;
}
/* Remember to free pte when done */
return pte;
}
/**
* @brief This function allocates GPT header, reads into it from disk.
*
* @param disk the blk of disk.
* @param lba the Logical Block Address of the partition table
* @return GPT header on success, null on error.
*/
static gpt_header *alloc_read_gpt_header(struct rt_blk_disk *disk, rt_uint64_t lba)
{
gpt_header *gpt;
rt_uint32_t ssz = rt_blk_disk_get_logical_block_size(disk);
gpt = rt_malloc(ssz);
if (!gpt)
{
return RT_NULL;
}
if (read_lba(disk, lba, (rt_uint8_t *)gpt, ssz) < ssz)
{
rt_free(gpt);
gpt = RT_NULL;
return RT_NULL;
}
/* Remember to free gpt when finished with it */
return gpt;
}
/**
* @brief This function tests one GPT header and PTEs for validity.
*
* @param disk the blk of disk.
* @param lba the Logical Block Address of the GPT header to test.
* @param gpt the GPT header ptr, filled on return.
* @param ptes the PTEs ptr, filled on return.
* @returns true if valid, false on error.
* If valid, returns pointers to newly allocated GPT header and PTEs.
*/
static rt_bool_t is_gpt_valid(struct rt_blk_disk *disk,
rt_uint64_t lba, gpt_header **gpt, gpt_entry **ptes)
{
rt_uint32_t crc, origcrc;
rt_uint64_t lastlba, pt_size;
rt_ssize_t logical_block_size;
if (!ptes)
{
return RT_FALSE;
}
if (!(*gpt = alloc_read_gpt_header(disk, lba)))
{
return RT_FALSE;
}
/* Check the GUID Partition Table signature */
if (rt_le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE)
{
LOG_D("%s: GUID Partition Table Header signature is wrong: %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->signature),
(rt_uint64_t)GPT_HEADER_SIGNATURE);
goto _fail;
}
/* Check the GUID Partition Table header size is too big */
logical_block_size = rt_blk_disk_get_logical_block_size(disk);
if (rt_le32_to_cpu((*gpt)->header_size) > logical_block_size)
{
LOG_D("%s: GUID Partition Table Header size is too large: %u > %u",
to_disk_name(disk),
rt_le32_to_cpu((*gpt)->header_size),
logical_block_size);
goto _fail;
}
/* Check the GUID Partition Table header size is too small */
if (rt_le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header))
{
LOG_D("%s: GUID Partition Table Header size is too small: %u < %u",
to_disk_name(disk),
rt_le32_to_cpu((*gpt)->header_size),
sizeof(gpt_header));
goto _fail;
}
/* Check the GUID Partition Table CRC */
origcrc = rt_le32_to_cpu((*gpt)->header_crc32);
(*gpt)->header_crc32 = 0;
crc = efi_crc32((const rt_uint8_t *)(*gpt), rt_le32_to_cpu((*gpt)->header_size));
if (crc != origcrc)
{
LOG_D("%s: GUID Partition Table Header CRC is wrong: %x != %x",
to_disk_name(disk), crc, origcrc);
goto _fail;
}
(*gpt)->header_crc32 = rt_cpu_to_le32(origcrc);
/*
* Check that the start_lba entry points to the LBA that contains
* the GUID Partition Table
*/
if (rt_le64_to_cpu((*gpt)->start_lba) != lba)
{
LOG_D("%s: GPT start_lba incorrect: %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->start_lba),
(rt_uint64_t)lba);
goto _fail;
}
/* Check the first_usable_lba and last_usable_lba are within the disk */
lastlba = last_lba(disk);
if (rt_le64_to_cpu((*gpt)->first_usable_lba) > lastlba)
{
LOG_D("%s: GPT: first_usable_lba incorrect: %lld > %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->first_usable_lba),
(rt_uint64_t)lastlba);
goto _fail;
}
if (rt_le64_to_cpu((*gpt)->last_usable_lba) > lastlba)
{
LOG_D("%s: GPT: last_usable_lba incorrect: %lld > %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->last_usable_lba),
(rt_uint64_t)lastlba);
goto _fail;
}
if (rt_le64_to_cpu((*gpt)->last_usable_lba) < rt_le64_to_cpu((*gpt)->first_usable_lba))
{
LOG_D("%s: GPT: last_usable_lba incorrect: %lld > %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->last_usable_lba),
(rt_uint64_t)rt_le64_to_cpu((*gpt)->first_usable_lba));
goto _fail;
}
/* Check that sizeof_partition_entry has the correct value */
if (rt_le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry))
{
LOG_D("%s: GUID Partition Entry Size check failed", to_disk_name(disk));
goto _fail;
}
/* Sanity check partition table size */
pt_size = (rt_uint64_t)rt_le32_to_cpu((*gpt)->num_partition_entries) *
rt_le32_to_cpu((*gpt)->sizeof_partition_entry);
if (!(*ptes = alloc_read_gpt_entries(disk, *gpt)))
{
goto _fail;
}
/* Check the GUID Partition Entry Array CRC */
crc = efi_crc32((const rt_uint8_t *)(*ptes), pt_size);
if (crc != rt_le32_to_cpu((*gpt)->partition_entry_array_crc32))
{
LOG_D("%s: GUID Partition Entry Array CRC check failed", to_disk_name(disk));
goto _fail_ptes;
}
/* We're done, all's well */
return RT_TRUE;
_fail_ptes:
rt_free(*ptes);
*ptes = RT_NULL;
_fail:
rt_free(*gpt);
*gpt = RT_NULL;
return RT_FALSE;
}
/**
* @brief This function tests one PTE for validity.
*
* @param pte the pte to check.
* @param lastlba the last lba of the disk.
* @return valid boolean of pte.
*/
rt_inline rt_bool_t is_pte_valid(const gpt_entry *pte, const rt_size_t lastlba)
{
if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) ||
rt_le64_to_cpu(pte->starting_lba) > lastlba ||
rt_le64_to_cpu(pte->ending_lba) > lastlba)
{
return RT_FALSE;
}
return RT_TRUE;
}
/**
* @brief This function search disk for valid GPT headers and PTEs.
*
* @param disk the blk of disk.
* @param pgpt the primary GPT header.
* @param agpt the alternate GPT header.
* @param lastlba the last LBA number.
*/
static void compare_gpts(struct rt_blk_disk *disk,
gpt_header *pgpt, gpt_header *agpt, rt_uint64_t lastlba)
{
int error_found = 0;
if (!pgpt || !agpt)
{
return;
}
if (rt_le64_to_cpu(pgpt->start_lba) != rt_le64_to_cpu(agpt->alternate_lba))
{
LOG_W("%s: GPT:Primary header LBA(%lld) != Alt(%lld), header alternate_lba",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->start_lba),
(rt_uint64_t)rt_le64_to_cpu(agpt->alternate_lba));
++error_found;
}
if (rt_le64_to_cpu(pgpt->alternate_lba) != rt_le64_to_cpu(agpt->start_lba))
{
LOG_W("%s: GPT:Primary header alternate_lba(%lld) != Alt(%lld), header start_lba",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->alternate_lba),
(rt_uint64_t)rt_le64_to_cpu(agpt->start_lba));
++error_found;
}
if (rt_le64_to_cpu(pgpt->first_usable_lba) != rt_le64_to_cpu(agpt->first_usable_lba))
{
LOG_W("%s: GPT:first_usable_lbas don't match %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->first_usable_lba),
(rt_uint64_t)rt_le64_to_cpu(agpt->first_usable_lba));
++error_found;
}
if (rt_le64_to_cpu(pgpt->last_usable_lba) != rt_le64_to_cpu(agpt->last_usable_lba))
{
LOG_W("%s: GPT:last_usable_lbas don't match %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->last_usable_lba),
(rt_uint64_t)rt_le64_to_cpu(agpt->last_usable_lba));
++error_found;
}
if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid))
{
LOG_W("%s: GPT:disk_guids don't match", to_disk_name(disk));
++error_found;
}
if (rt_le32_to_cpu(pgpt->num_partition_entries) !=
rt_le32_to_cpu(agpt->num_partition_entries))
{
LOG_W("%s: GPT:num_partition_entries don't match: 0x%x != 0x%x",
to_disk_name(disk),
rt_le32_to_cpu(pgpt->num_partition_entries),
rt_le32_to_cpu(agpt->num_partition_entries));
++error_found;
}
if (rt_le32_to_cpu(pgpt->sizeof_partition_entry) !=
rt_le32_to_cpu(agpt->sizeof_partition_entry))
{
LOG_W("%s: GPT:sizeof_partition_entry values don't match: 0x%x != 0x%x",
to_disk_name(disk),
rt_le32_to_cpu(pgpt->sizeof_partition_entry),
rt_le32_to_cpu(agpt->sizeof_partition_entry));
++error_found;
}
if (rt_le32_to_cpu(pgpt->partition_entry_array_crc32) !=
rt_le32_to_cpu(agpt->partition_entry_array_crc32))
{
LOG_W("%s: GPT:partition_entry_array_crc32 values don't match: 0x%x != 0x%x",
to_disk_name(disk),
rt_le32_to_cpu(pgpt->partition_entry_array_crc32),
rt_le32_to_cpu(agpt->partition_entry_array_crc32));
++error_found;
}
if (rt_le64_to_cpu(pgpt->alternate_lba) != lastlba)
{
LOG_W("%s: GPT:Primary header thinks Alt. header is not at the end of the disk: %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(pgpt->alternate_lba),
(rt_uint64_t)lastlba);
++error_found;
}
if (rt_le64_to_cpu(agpt->start_lba) != lastlba)
{
LOG_W("%s: GPT:Alternate GPT header not at the end of the disk: %lld != %lld",
to_disk_name(disk),
(rt_uint64_t)rt_le64_to_cpu(agpt->start_lba),
(rt_uint64_t)lastlba);
++error_found;
}
if (error_found)
{
LOG_W("GPT: Use GNU Parted to correct GPT errors");
}
}
/**
* @brief This function search disk for valid GPT headers and PTEs.
*
* @param disk the disk parsed partitions.
* @param gpt the GPT header ptr, filled on return.
* @param ptes the PTEs ptr, filled on return.
* @return 1 if valid, 0 on error.
* If valid, returns pointers to newly allocated GPT header and PTEs.
* Validity depends on PMBR being valid (or being overridden by the
* 'gpt' kernel command line option) and finding either the Primary
* GPT header and PTEs valid, or the Alternate GPT header and PTEs
* valid. If the Primary GPT header is not valid, the Alternate GPT header
* is not checked unless the 'gpt' kernel command line option is passed.
* This protects against devices which misreport their size, and forces
* the user to decide to use the Alternate GPT.
*/
static rt_bool_t find_valid_gpt(struct rt_blk_disk *disk,
gpt_header **gpt, gpt_entry **ptes)
{
int good_pgpt = 0, good_agpt = 0, good_pmbr = 0;
gpt_header *pgpt = RT_NULL, *agpt = RT_NULL;
gpt_entry *pptes = RT_NULL, *aptes = RT_NULL;
legacy_mbr *legacymbr;
rt_size_t total_sectors = rt_blk_disk_get_capacity(disk);
rt_size_t lastlba;
if (!ptes)
{
return RT_FALSE;
}
lastlba = last_lba(disk);
if (!force_gpt)
{
/* This will be added to the EFI Spec. per Intel after v1.02. */
legacymbr = rt_malloc(sizeof(*legacymbr));
if (!legacymbr)
{
return RT_FALSE;
}
read_lba(disk, 0, (rt_uint8_t *)legacymbr, sizeof(*legacymbr));
good_pmbr = is_pmbr_valid(legacymbr, total_sectors);
rt_free(legacymbr);
if (!good_pmbr)
{
return RT_FALSE;
}
LOG_D("%s: Device has a %s MBR", to_disk_name(disk),
good_pmbr == GPT_MBR_PROTECTIVE ? "protective" : "hybrid");
}
good_pgpt = is_gpt_valid(disk, GPT_PRIMARY_PARTITION_TABLE_LBA, &pgpt, &pptes);
if (good_pgpt)
{
good_agpt = is_gpt_valid(disk, rt_le64_to_cpu(pgpt->alternate_lba), &agpt, &aptes);
}
if (!good_agpt && force_gpt)
{
good_agpt = is_gpt_valid(disk, lastlba, &agpt, &aptes);
}
/* The obviously unsuccessful case */
if (!good_pgpt && !good_agpt)
{
goto _fail;
}
compare_gpts(disk, pgpt, agpt, lastlba);
/* The good cases */
if (good_pgpt)
{
*gpt = pgpt;
*ptes = pptes;
rt_free(agpt);
rt_free(aptes);
if (!good_agpt)
{
LOG_D("%s: Alternate GPT is invalid, using primary GPT", to_disk_name(disk));
}
return RT_TRUE;
}
else if (good_agpt)
{
*gpt = agpt;
*ptes = aptes;
rt_free(pgpt);
rt_free(pptes);
LOG_D("%s: Primary GPT is invalid, using alternate GPT", to_disk_name(disk));
return RT_TRUE;
}
_fail:
rt_free(pgpt);
rt_free(agpt);
rt_free(pptes);
rt_free(aptes);
*gpt = RT_NULL;
*ptes = RT_NULL;
return RT_FALSE;
}
rt_err_t efi_partition(struct rt_blk_disk *disk)
{
rt_uint32_t entries_nr;
gpt_header *gpt = RT_NULL;
gpt_entry *ptes = RT_NULL;
if (!find_valid_gpt(disk, &gpt, &ptes) || !gpt || !ptes)
{
rt_free(gpt);
rt_free(ptes);
return -RT_EINVAL;
}
entries_nr = rt_le32_to_cpu(gpt->num_partition_entries);
for (int i = 0; i < entries_nr && i < disk->max_partitions; ++i)
{
rt_uint64_t start = rt_le64_to_cpu(ptes[i].starting_lba);
rt_uint64_t size = rt_le64_to_cpu(ptes[i].ending_lba) -
rt_le64_to_cpu(ptes[i].starting_lba) + 1ULL;
if (!is_pte_valid(&ptes[i], last_lba(disk)))
{
continue;
}
if (blk_put_partition(disk, "gpt", start, size, i) == -RT_ENOMEM)
{
break;
}
}
rt_free(gpt);
rt_free(ptes);
return RT_EOK;
}

View File

@ -0,0 +1,141 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-05-05 linzhenxing first version
* 2023-02-25 GuEe-GUI make blk interface
*/
#ifndef __PARTITIONS_EFI_H__
#define __PARTITIONS_EFI_H__
#include "../blk_partition.h"
#include <drivers/misc.h>
#include <drivers/byteorder.h>
#define MSDOS_MBR_SIGNATURE 0xaa55
#define EFI_PMBR_OSTYPE_EFI 0xef
#define EFI_PMBR_OSTYPE_EFI_GPT 0xee
#define GPT_MBR_PROTECTIVE 1
#define GPT_MBR_HYBRID 2
#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
#define GPT_HEADER_REVISION_V1 0x00010000
#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
#ifndef __UUID_H__
#define UUID_SIZE 16
typedef struct
{
rt_uint8_t b[UUID_SIZE];
} guid_t;
#endif /* __UUID_H__ */
#ifndef __EFI_H__
typedef guid_t efi_guid_t rt_align(4);
#define EFI_GUID(a, b, c, d...) (efi_guid_t) \
{{ \
(a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
(c) & 0xff, ((c) >> 8) & 0xff, \
d \
}}
#define NULL_GUID \
EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
rt_inline int efi_guidcmp(efi_guid_t left, efi_guid_t right)
{
return rt_memcmp(&left, &right, sizeof (efi_guid_t));
}
#endif /* __EFI_H__ */
#define PARTITION_SYSTEM_GUID \
EFI_GUID(0xc12a7328, 0xf81f, 0x11d2, 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b)
#define LEGACY_MBR_PARTITION_GUID \
EFI_GUID(0x024dee41, 0x33e7, 0x11d3, 0x9d, 0x69, 0x00, 0x08, 0xc7, 0x81, 0xf3, 0x9f)
#define PARTITION_MSFT_RESERVED_GUID \
EFI_GUID(0xe3c9e316, 0x0b5c, 0x4db8, 0x81, 0x7d, 0xf9, 0x2d, 0xf0, 0x02, 0x15, 0xae)
#define PARTITION_BASIC_DATA_GUID \
EFI_GUID(0xebd0a0a2, 0xb9e5, 0x4433, 0x87, 0xc0, 0x68, 0xb6, 0xb7, 0x26, 0x99, 0xc7)
rt_packed(struct _gpt_header
{
rt_le64_t signature;
rt_le32_t revision;
rt_le32_t header_size;
rt_le32_t header_crc32;
rt_le32_t reserved1;
rt_le64_t start_lba;
rt_le64_t alternate_lba;
rt_le64_t first_usable_lba;
rt_le64_t last_usable_lba;
efi_guid_t disk_guid;
rt_le64_t partition_entry_lba;
rt_le32_t num_partition_entries;
rt_le32_t sizeof_partition_entry;
rt_le32_t partition_entry_array_crc32;
/*
* The rest of the logical block is reserved by UEFI and must be zero.
* EFI standard handles this by:
*
* uint8_t reserved2[BlockSize - 92];
*/
});
typedef struct _gpt_header gpt_header;
rt_packed(struct _gpt_entry_attributes
{
rt_uint64_t required_to_function:1;
rt_uint64_t reserved:47;
rt_uint64_t type_guid_specific:16;
});
typedef struct _gpt_entry_attributes gpt_entry_attributes;
rt_packed(struct _gpt_entry
{
efi_guid_t partition_type_guid;
efi_guid_t unique_partition_guid;
rt_le64_t starting_lba;
rt_le64_t ending_lba;
gpt_entry_attributes attributes;
rt_le16_t partition_name[72/sizeof(rt_le16_t)];
});
typedef struct _gpt_entry gpt_entry;
rt_packed(struct _gpt_mbr_record
{
rt_uint8_t boot_indicator; /* unused by EFI, set to 0x80 for bootable */
rt_uint8_t start_head; /* unused by EFI, pt start in CHS */
rt_uint8_t start_sector; /* unused by EFI, pt start in CHS */
rt_uint8_t start_track;
rt_uint8_t os_type; /* EFI and legacy non-EFI OS types */
rt_uint8_t end_head; /* unused by EFI, pt end in CHS */
rt_uint8_t end_sector; /* unused by EFI, pt end in CHS */
rt_uint8_t end_track; /* unused by EFI, pt end in CHS */
rt_le32_t starting_lba; /* used by EFI - start addr of the on disk pt */
rt_le32_t size_in_lba; /* used by EFI - size of pt in LBA */
});
typedef struct _gpt_mbr_record gpt_mbr_record;
rt_packed(struct _legacy_mbr
{
rt_uint8_t boot_code[440];
rt_le32_t unique_mbr_signature;
rt_le16_t unknown;
gpt_mbr_record partition_record[4];
rt_le16_t signature;
});
typedef struct _legacy_mbr legacy_mbr;
#endif /* __PARTITIONS_EFI_H__ */

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-25 GuEe-GUI first version
*/
#ifndef __BLK_H__
#define __BLK_H__
#include <rthw.h>
#include <rtthread.h>
#include <drivers/classes/block.h>
struct rt_dm_ida;
struct rt_blk_device;
struct rt_blk_disk_ops;
struct rt_blk_disk
{
struct rt_device parent;
const struct rt_blk_disk_ops *ops;
#ifdef RT_USING_DM
struct rt_dm_ida *ida;
#endif
rt_uint32_t read_only:1;
rt_uint32_t parallel_io:1;
rt_uint32_t removable:1;
#define RT_BLK_DISK_MAGIC 0xbdaabdaa
rt_uint32_t __magic;
rt_uint32_t partitions;
#define RT_BLK_PARTITION_NONE (-1)
#define RT_BLK_PARTITION_MAX (RT_UINT32_MAX >> 1)
rt_int32_t max_partitions;
rt_list_t part_nodes;
struct rt_spinlock lock;
struct rt_semaphore usr_lock;
};
struct rt_blk_disk_ops
{
rt_ssize_t (*read)(struct rt_blk_disk *disk, rt_off_t sector, void *buffer,
rt_size_t sector_count);
rt_ssize_t (*write)(struct rt_blk_disk *disk, rt_off_t sector, const void *buffer,
rt_size_t sector_count);
rt_err_t (*getgeome)(struct rt_blk_disk *disk, struct rt_device_blk_geometry *geometry);
rt_err_t (*sync)(struct rt_blk_disk *disk);
rt_err_t (*erase)(struct rt_blk_disk *disk);
rt_err_t (*autorefresh)(struct rt_blk_disk *disk, rt_bool_t is_auto);
rt_err_t (*control)(struct rt_blk_disk *disk, struct rt_blk_device *blk, int cmd, void *args);
};
#ifndef __DFS_H__
#include <dfs_fs.h>
struct rt_blk_device
{
struct rt_device parent;
int partno;
struct dfs_partition partition;
rt_list_t list;
struct rt_blk_disk *disk;
rt_size_t sector_start;
rt_size_t sector_count;
};
#else
struct rt_blk_device;
#endif /* __DFS_H__ */
rt_err_t rt_hw_blk_disk_register(struct rt_blk_disk *disk);
rt_err_t rt_hw_blk_disk_unregister(struct rt_blk_disk *disk);
rt_err_t rt_blk_disk_probe_partition(struct rt_blk_disk *disk);
rt_ssize_t rt_blk_disk_get_capacity(struct rt_blk_disk *disk);
rt_ssize_t rt_blk_disk_get_logical_block_size(struct rt_blk_disk *disk);
#endif /* __BLK_H__ */

View File

@ -45,6 +45,10 @@ extern "C" {
#include "drivers/core/power_domain.h"
#include "drivers/platform.h"
#ifdef RT_USING_BLK
#include "drivers/blk.h"
#endif
#ifdef RT_USING_OFW
#include "drivers/ofw.h"
#include "drivers/ofw_fdt.h"