This commit is contained in:
2024-08-05 20:57:09 +08:00
commit 46d9ee7795
3020 changed files with 1725767 additions and 0 deletions

View File

@@ -0,0 +1,10 @@
This folder contains:
| sub-folders | description |
| ----------- | ------------------------- |
| aio | Asynchronous I/O |
| mman | Memory-Mapped I/O |
| poll | Nonblocking I/O |
| stdio | Standard Input/Output I/O |
| termios | Terminal I/O |

View File

@@ -0,0 +1,15 @@
# RT-Thread building script for component
import os
from building import *
cwd = GetCurrentDir()
group = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
group = group + SConscript(os.path.join(d, 'SConscript'))
Return('group')

View File

@@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = ['aio.c']
CPPPATH = [cwd]
group = DefineGroup('POSIX', src, depend = ['RT_USING_POSIX_AIO'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,506 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017/12/30 Bernard The first version.
* 2024/03/26 TroyMitchelle Added some function comments
* 2024/03/27 TroyMitchelle Fix the issue of incorrect return of invalid parameters in aio_write
*/
#include <rtthread.h>
#include <rthw.h>
#include <stdint.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/errno.h>
#include "aio.h"
struct rt_workqueue* aio_queue = NULL;
/**
* The aio_cancel() function shall attempt to cancel one or more asynchronous I/O
* requests currently outstanding against file descriptor fildes. The aiocbp
* argument points to the asynchronous I/O control block for a particular request
* to be canceled. If aiocbp is NULL, then all outstanding cancelable asynchronous
* I/O requests against fildes shall be canceled.
*
* Normal asynchronous notification shall occur for asynchronous I/O operations
* that are successfully canceled. If there are requests that cannot be canceled,
* then the normal asynchronous completion process shall take place for those
* requests when they are completed.
*
* For requested operations that are successfully canceled, the associated error
* status shall be set to [ECANCELED] and the return status shall be -1. For
* requested operations that are not successfully canceled, the aiocbp shall not
* be modified by aio_cancel().
*
* If aiocbp is not NULL, then if fildes does not have the same value as the file
* descriptor with which the asynchronous operation was initiated, unspecified results occur.
*
* Which operations are cancelable is implementation-defined.
*/
int aio_cancel(int fd, struct aiocb *cb)
{
rt_err_t ret;
if (!cb) return -EINVAL;
if (cb->aio_fildes != fd) return -EINVAL;
ret = rt_workqueue_cancel_work_sync(aio_queue, &(cb->aio_work));
if (ret == RT_EOK)
{
errno = -ECANCELED;
return -1;
}
return 0;
}
/**
* The aio_error() function shall return the error status associated with the
* aiocb structure referenced by the aiocbp argument. The error status for an
* asynchronous I/O operation is the errno value that would be set by the corresponding
* read(), write(),
*/
int aio_error(const struct aiocb *cb)
{
if (cb)
{
return cb->aio_result;
}
return -EINVAL;
}
/**
* The aio_fsync() function shall asynchronously perform a file synchronization
* operation, as specified by the op argument, for I/O operations associated with
* the file indicated by the file descriptor aio_fildes member of the aiocb
* structure referenced by the aiocbp argument and queued at the time of the
* call to aio_fsync(). The function call shall return when the synchronization
* request has been initiated or queued to the file or device (even when the data
* cannot be synchronized immediately).
*
* option: If op is O_DSYNC, all currently queued I/O operations shall be completed
* as if by a call to fdatasync(); that is, as defined for synchronized I/O data
* integrity completion.
*
* option: If op is O_SYNC, all currently queued I/O operations shall be completed
* as if by a call to fsync(); that is, as defined for synchronized I/O file integrity
* completion. If the aio_fsync() function fails, or if the operation queued by
* aio_fsync() fails, then outstanding I/O operations are not guaranteed to have
* been completed.
*
* If aio_fsync() succeeds, then it is only the I/O that was queued at the time
* of the call to aio_fsync() that is guaranteed to be forced to the relevant
* completion state. The completion of subsequent I/O on the file descriptor is
* not guaranteed to be completed in a synchronized fashion.
*
* The aiocbp argument refers to an asynchronous I/O control block. The aiocbp
* value may be used as an argument to aio_error() and aio_return() in order to
* determine the error status and return status, respectively, of the asynchronous
* operation while it is proceeding. When the request is queued, the error status
* for the operation is [EINPROGRESS]. When all data has been successfully transferred,
* the error status shall be reset to reflect the success or failure of the operation.
* If the operation does not complete successfully, the error status for the
* operation shall be set to indicate the error. The aio_sigevent member determines
* the asynchronous notification to occur as specified in Signal Generation and
* Delivery when all operations have achieved synchronized I/O completion. All
* other members of the structure referenced by aiocbp are ignored. If the control
* block referenced by aiocbp becomes an illegal address prior to asynchronous
* I/O completion, then the behavior is undefined.
*
* If the aio_fsync() function fails or aiocbp indicates an error condition,
* data is not guaranteed to have been successfully transferred.
*/
static void aio_fync_work(struct rt_work* work, void* work_data)
{
int result;
rt_base_t level;
struct aiocb *cb = (struct aiocb*)work_data;
RT_ASSERT(cb != RT_NULL);
result = fsync(cb->aio_fildes);
/* modify result */
level = rt_hw_interrupt_disable();
if (result < 0)
cb->aio_result = errno;
else
cb->aio_result = 0;
rt_hw_interrupt_enable(level);
return ;
}
/**
* @brief Initiates an asynchronous fsync operation.
*
* This function initiates an asynchronous fsync operation on the file associated
* with the specified aiocb structure. The operation is queued to the workqueue
* for execution.
*
* @param op The operation to be performed. This parameter is ignored.
* @param cb Pointer to the aiocb structure representing the asynchronous fsync operation.
*
* @return Returns 0 on success.
*/
int aio_fsync(int op, struct aiocb *cb)
{
rt_base_t level;
if (!cb) return -EINVAL;
level = rt_hw_interrupt_disable();
cb->aio_result = -EINPROGRESS;
rt_hw_interrupt_enable(level);
rt_work_init(&(cb->aio_work), aio_fync_work, cb);
rt_workqueue_dowork(aio_queue, &(cb->aio_work));
return 0;
}
/**
* @brief Worker function for asynchronous read operation.
*
* This function performs the actual reading of data from the file associated with
* the specified aiocb structure. It sets the result of the operation in the
* aio_result field of the aiocb structure.
*
* @param work Pointer to the work item.
* @param work_data Pointer to the aiocb structure representing the asynchronous read operation.
*/
static void aio_read_work(struct rt_work* work, void* work_data)
{
int len;
rt_base_t level;
uint8_t *buf_ptr;
struct aiocb *cb = (struct aiocb*)work_data;
buf_ptr = (uint8_t*)cb->aio_buf;
/* seek to offset */
lseek(cb->aio_fildes, cb->aio_offset, SEEK_SET);
len = read(cb->aio_fildes, &buf_ptr[cb->aio_offset], cb->aio_nbytes);
/* modify result */
level = rt_hw_interrupt_disable();
if (len <= 0)
cb->aio_result = errno;
else
cb->aio_result = len;
rt_hw_interrupt_enable(level);
return ;
}
/**
* The aio_read() function shall read aiocbp->aio_nbytes from the file associated
* with aiocbp->aio_fildes into the buffer pointed to by aiocbp->aio_buf. The
* function call shall return when the read request has been initiated or queued
* to the file or device (even when the data cannot be delivered immediately).
*
* If prioritized I/O is supported for this file, then the asynchronous operation
* shall be submitted at a priority equal to a base scheduling priority minus
* aiocbp->aio_reqprio. If Thread Execution Scheduling is not supported, then
* the base scheduling priority is that of the calling process;
*
* otherwise, the base scheduling priority is that of the calling thread.
*
* The aiocbp value may be used as an argument to aio_error() and aio_return()
* in order to determine the error status and return status, respectively, of
* the asynchronous operation while it is proceeding. If an error condition is
* encountered during queuing, the function call shall return without having
* initiated or queued the request. The requested operation takes place at the
* absolute position in the file as given by aio_offset, as if lseek() were called
* immediately prior to the operation with an offset equal to aio_offset and a
* whence equal to SEEK_SET. After a successful call to enqueue an asynchronous
* I/O operation, the value of the file offset for the file is unspecified.
*
* The aio_sigevent member specifies the notification which occurs when the
* request is completed.
*
* The aiocbp->aio_lio_opcode field shall be ignored by aio_read().
*
* The aiocbp argument points to an aiocb structure. If the buffer pointed to by
* aiocbp->aio_buf or the control block pointed to by aiocbp becomes an illegal
* address prior to asynchronous I/O completion, then the behavior is undefined.
*
* Simultaneous asynchronous operations using the same aiocbp produce undefined
* results.
*
* If synchronized I/O is enabled on the file associated with aiocbp->aio_fildes,
* the behavior of this function shall be according to the definitions of synchronized
* I/O data integrity completion and synchronized I/O file integrity completion.
*
* For any system action that changes the process memory space while an asynchronous
* I/O is outstanding to the address range being changed, the result of that action
* is undefined.
*
* For regular files, no data transfer shall occur past the offset maximum
* established in the open file description associated with aiocbp->aio_fildes.
*
*/
int aio_read(struct aiocb *cb)
{
rt_base_t level;
if (!cb) return -EINVAL;
if (cb->aio_offset < 0) return -EINVAL;
level = rt_hw_interrupt_disable();
cb->aio_result = -EINPROGRESS;
rt_hw_interrupt_enable(level);
/* en-queue read work */
rt_work_init(&(cb->aio_work), aio_read_work, cb);
rt_workqueue_dowork(aio_queue, &(cb->aio_work));
return 0;
}
/**
* The aio_return() function shall return the return status associated with the
* aiocb structure referenced by the aiocbp argument. The return status for an
* asynchronous I/O operation is the value that would be returned by the corresponding
* read(), write(), or fsync() function call. If the error status for the operation
* is equal to [EINPROGRESS], then the return status for the operation is undefined.
* The aio_return() function may be called exactly once to retrieve the return
* status of a given asynchronous operation; thereafter, if the same aiocb structure
* is used in a call to aio_return() or aio_error(), an error may be returned.
* When the aiocb structure referred to by aiocbp is used to submit another asynchronous
* operation, then aio_return() may be successfully used to retrieve the return
* status of that operation.
*/
ssize_t aio_return(struct aiocb *cb)
{
if (cb)
{
if (cb->aio_result < 0)
rt_set_errno(cb->aio_result);
return cb->aio_result;
}
return -EINVAL;
}
/**
* The aio_suspend() function shall suspend the calling thread until at least
* one of the asynchronous I/O operations referenced by the list argument has
* completed, until a signal interrupts the function, or, if timeout is not NULL,
* until the time interval specified by timeout has passed. If any of the aiocb
* structures in the list correspond to completed asynchronous I/O operations
* (that is, the error status for the operation is not equal to [EINPROGRESS])
* at the time of the call, the function shall return without suspending the
* calling thread. The list argument is an array of pointers to asynchronous I/O
* control blocks. The nent argument indicates the number of elements in the
* array. Each aiocb structure pointed to has been used in initiating an asynchronous
* I/O request via aio_read(), aio_write(), or lio_listio(). This array may
* contain null pointers, which are ignored. If this array contains pointers
* that refer to aiocb structures that have not been used in submitting asynchronous
* I/O, the effect is undefined.
*
* If the time interval indicated in the timespec structure pointed to by timeout
* passes before any of the I/O operations referenced by list are completed, then
* aio_suspend() shall return with an error.
*/
int aio_suspend(const struct aiocb *const list[], int nent,
const struct timespec *timeout)
{
return -ENOSYS;
}
/**
* @brief Worker function for asynchronous write operation.
*
* This function performs the actual writing of data to the file associated with
* the specified aiocb structure. It sets the result of the operation in the
* aio_result field of the aiocb structure.
*
* @param work Pointer to the work item.
* @param work_data Pointer to the aiocb structure representing the asynchronous write operation.
*/
static void aio_write_work(struct rt_work* work, void* work_data)
{
rt_base_t level;
int len, oflags;
uint8_t *buf_ptr;
struct aiocb *cb = (struct aiocb*)work_data;
buf_ptr = (uint8_t*)cb->aio_buf;
/* whether seek offset */
oflags = fcntl(cb->aio_fildes, F_GETFL, 0);
if ((oflags & O_APPEND) == 0)
{
lseek(cb->aio_fildes, SEEK_SET, cb->aio_offset);
}
/* write data */
len = write(cb->aio_fildes, buf_ptr, cb->aio_nbytes);
/* modify result */
level = rt_hw_interrupt_disable();
if (len <= 0)
cb->aio_result = errno;
else
cb->aio_result = len;
rt_hw_interrupt_enable(level);
return;
}
/**
* The aio_write() function shall write aiocbp->aio_nbytes to the file associated
* with aiocbp->aio_fildes from the buffer pointed to by aiocbp->aio_buf. The
* function shall return when the write request has been initiated or, at a minimum,
* queued to the file or device.
*
* The aiocbp argument may be used as an argument to aio_error() and aio_return()
* in order to determine the error status and return status, respectively, of the
* asynchronous operation while it is proceeding.
*
* The aiocbp argument points to an aiocb structure. If the buffer pointed to by
* aiocbp->aio_buf or the control block pointed to by aiocbp becomes an illegal
* address prior to asynchronous I/O completion, then the behavior is undefined.
*
* If O_APPEND is not set for the file descriptor aio_fildes, then the requested
* operation shall take place at the absolute position in the file as given by
* aio_offset, as if lseek() were called immediately prior to the operation with
* an offset equal to aio_offset and a whence equal to SEEK_SET. If O_APPEND is
* set for the file descriptor, or if aio_fildes is associated with a device that
* is incapable of seeking, write operations append to the file in the same order
* as the calls were made, except under circumstances described in Asynchronous
* I/O. After a successful call to enqueue an asynchronous I/O operation, the value
* of the file offset for the file is unspecified.
*
* The aio_sigevent member specifies the notification which occurs when the request
* is completed.
*
* The aiocbp->aio_lio_opcode field shall be ignored by aio_write().
*
* Simultaneous asynchronous operations using the same aiocbp produce undefined
* results.
*
* If synchronized I/O is enabled on the file associated with aiocbp->aio_fildes,
* the behavior of this function shall be according to the definitions of synchronized
* I/O data integrity completion, and synchronized I/O file integrity completion.
*
* For regular files, no data transfer shall occur past the offset maximum established
* in the open file description associated with aiocbp->aio_fildes.
*/
int aio_write(struct aiocb *cb)
{
int oflags;
rt_base_t level;
if (!cb || (cb->aio_buf == NULL)) return -EINVAL;
/* check access mode */
oflags = fcntl(cb->aio_fildes, F_GETFL, 0);
/* If the flag is not in write only or read-write mode, it cannot be written then an invalid parameter is returned */
if ((oflags & O_ACCMODE) != O_WRONLY &&
(oflags & O_ACCMODE) != O_RDWR)
return -EINVAL;
level = rt_hw_interrupt_disable();
cb->aio_result = -EINPROGRESS;
rt_hw_interrupt_enable(level);
rt_work_init(&(cb->aio_work), aio_write_work, cb);
rt_workqueue_dowork(aio_queue, &(cb->aio_work));
return 0;
}
/**
* The lio_listio() function shall initiate a list of I/O requests with a single
* function call.
*
* The mode argument takes one of the values LIO_WAIT or LIO_NOWAIT declared in
* <aio.h> and determines whether the function returns when the I/O operations
* have been completed, or as soon as the operations have been queued. If the
* mode argument is LIO_WAIT, the function shall wait until all I/O is complete
* and the sig argument shall be ignored.
*
* If the mode argument is LIO_NOWAIT, the function shall return immediately, and
* asynchronous notification shall occur, according to the sig argument, when all
* the I/O operations complete. If sig is NULL, then no asynchronous notification
* shall occur. If sig is not NULL, asynchronous notification occurs as specified
* in Signal Generation and Delivery when all the requests in list have completed.
*
* The I/O requests enumerated by list are submitted in an unspecified order.
*
* The list argument is an array of pointers to aiocb structures. The array contains
* nent elements. The array may contain NULL elements, which shall be ignored.
*
* If the buffer pointed to by list or the aiocb structures pointed to by the
* elements of the array list become illegal addresses before all asynchronous I/O
* completed and, if necessary, the notification is sent, then the behavior is
* undefined. If the buffers pointed to by the aio_buf member of the aiocb structure
* pointed to by the elements of the array list become illegal addresses prior to
* the asynchronous I/O associated with that aiocb structure being completed, the
* behavior is undefined.
*
* The aio_lio_opcode field of each aiocb structure specifies the operation to be
* performed. The supported operations are LIO_READ, LIO_WRITE, and LIO_NOP; these
* symbols are defined in <aio.h>. The LIO_NOP operation causes the list entry to
* be ignored. If the aio_lio_opcode element is equal to LIO_READ, then an I/O operation
* is submitted as if by a call to aio_read() with the aiocbp equal to the address
* of the aiocb structure. If the aio_lio_opcode element is equal to LIO_WRITE, then
* an I/O operation is submitted as if by a call to aio_write() with the aiocbp equal
* to the address of the aiocb structure.
*
* The aio_fildes member specifies the file descriptor on which the operation is to
* be performed.
*
* The aio_buf member specifies the address of the buffer to or from which the data
* is transferred.
*
* The aio_nbytes member specifies the number of bytes of data to be transferred.
*
* The members of the aiocb structure further describe the I/O operation to be
* performed, in a manner identical to that of the corresponding aiocb structure
* when used by the aio_read() and aio_write() functions.
*
* The nent argument specifies how many elements are members of the list; that is,
* the length of the array.
*
* The behavior of this function is altered according to the definitions of synchronized
* I/O data integrity completion and synchronized I/O file integrity completion if
* synchronized I/O is enabled on the file associated with aio_fildes.
*
* For regular files, no data transfer shall occur past the offset maximum established
* in the open file description associated with aiocbp->aio_fildes.
*
* If sig->sigev_notify is SIGEV_THREAD and sig->sigev_notify_attributes is a
* non-null pointer and the block pointed to by this pointer becomes an illegal
* address prior to all asynchronous I/O being completed, then the behavior is
* undefined.
*/
int lio_listio(int mode, struct aiocb * const list[], int nent,
struct sigevent *sig)
{
return -ENOSYS;
}
/**
* @brief Initializes the asynchronous I/O system.
*
* This function initializes the asynchronous I/O system by creating a workqueue
* for asynchronous I/O operations.
*
* @return Returns 0 on success.
*/
int aio_system_init(void)
{
aio_queue = rt_workqueue_create("aio", 2048, RT_THREAD_PRIORITY_MAX/2);
RT_ASSERT(aio_queue != NULL);
return 0;
}
INIT_COMPONENT_EXPORT(aio_system_init);

View File

@@ -0,0 +1,48 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017/12/30 Bernard The first version.
* 2024/03/26 TroyMitchelle Align comments within the aiocb structure
*/
#ifndef __AIO_H__
#define __AIO_H__
#include <stdio.h>
#include <sys/signal.h>
#include <rtdevice.h>
struct aiocb
{
int aio_fildes; /* File descriptor. */
off_t aio_offset; /* File offset. */
volatile void *aio_buf; /* Location of buffer. */
size_t aio_nbytes; /* Length of transfer. */
int aio_reqprio; /* Request priority offset. */
struct sigevent aio_sigevent; /* Signal number and value. */
int aio_lio_opcode; /* Operation to be performed. */
int aio_result;
struct rt_work aio_work;
};
int aio_cancel(int fd, struct aiocb *cb);
int aio_error (const struct aiocb *cb);
int aio_fsync(int op, struct aiocb *cb);
int aio_read(struct aiocb *cb);
ssize_t aio_return(struct aiocb *cb);
int aio_suspend(const struct aiocb *const list[], int nent,
const struct timespec *timeout);
int aio_write(struct aiocb *cb);
int lio_listio(int mode, struct aiocb * const list[], int nent,
struct sigevent *sig);
#endif

View File

@@ -0,0 +1,14 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = []
CPPPATH = [cwd]
if GetDepend('RT_USING_POSIX_EPOLL'):
src += ['epoll.c']
group = DefineGroup('POSIX', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,14 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = []
CPPPATH = [cwd]
if GetDepend('RT_USING_POSIX_EVENTFD'):
src += ['eventfd.c']
group = DefineGroup('POSIX', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,345 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-20 zmq810150896 first version
* 2024-03-28 TroyMitchell Add comments for all functions
*/
#include <rtthread.h>
#include <fcntl.h>
#include <rtdevice.h>
#include <stdint.h>
#include <unistd.h>
#include <dfs_file.h>
#include <dfs.h>
#include "poll.h"
#include "eventfd.h"
#define EFD_SEMAPHORE (1 << 0)
#define EFD_CLOEXEC O_CLOEXEC
#define EFD_NONBLOCK O_NONBLOCK
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
#define EFD_ULLONG_MAX (~0ULL)
#define EVENTFD_MUTEX_NAME "eventfd"
struct eventfd_ctx
{
rt_wqueue_t reader_queue;
rt_wqueue_t writer_queue;
rt_uint64_t count;
unsigned int flags;
struct rt_mutex lock;
};
#ifndef RT_USING_DFS_V2
static int eventfd_close(struct dfs_file *file);
static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req);
static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count);
static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count);
#else
static int eventfd_close(struct dfs_file *file);
static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req);
static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos);
static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos);
#endif
static const struct dfs_file_ops eventfd_fops =
{
.close = eventfd_close,
.poll = eventfd_poll,
.read = eventfd_read,
.write = eventfd_write,
};
/**
* @brief Closes an event file descriptor.
* @param file Pointer to the file descriptor structure.
* @return 0 on success, otherwise an error code.
*/
static int eventfd_close(struct dfs_file *file)
{
struct eventfd_ctx *ctx = file->vnode->data;
if (file->vnode->ref_count == 1)
{
rt_mutex_detach(&ctx->lock);
rt_free(ctx);
}
return 0;
}
/**
* @brief Polls an event file descriptor for events.
* @param file Pointer to the file descriptor structure.
* @param req Pointer to the poll request structure.
* @return Events that occurred on the file descriptor.
*/
static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req)
{
struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
int events = 0;
rt_uint64_t count;
count = ctx->count;
rt_poll_add(&ctx->reader_queue, req);
if (count > 0)
events |= POLLIN;
if (count == EFD_ULLONG_MAX)
events |= POLLERR;
if ((EFD_ULLONG_MAX - 1) > count)
events |= POLLOUT;
return events;
}
#ifndef RT_USING_DFS_V2
/**
* @brief Reads data from an event file descriptor.
* @param file Pointer to the file descriptor structure.
* @param buf Pointer to the buffer to read data into.
* @param count Maximum number of bytes to read.
* @return Number of bytes read on success, otherwise an error code.
*/
static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count)
#else
/**
* @brief Reads data from an event file descriptor.
* @param file Pointer to the file descriptor structure.
* @param buf Pointer to the buffer to read data into.
* @param count Maximum number of bytes to read.
* @param pos Pointer to the file position (not used).
* @return Number of bytes read on success, otherwise an error code.
*/
static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
#endif
{
struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
rt_uint64_t counter_num = 0;
rt_uint64_t *buffer;
if (count < sizeof(counter_num))
return -EINVAL;
buffer = (rt_uint64_t *)buf;
rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
if (ctx->count <= 0)
{
if (file->flags & O_NONBLOCK)
{
rt_wqueue_wakeup(&ctx->writer_queue, (void*)POLLOUT);
rt_mutex_release(&ctx->lock);
return -EAGAIN;
}
else
{
/* In this case, when the data is read in blocked mode, when ctx->count is 0, the mutex needs to be released and wait for writing */
rt_mutex_release(&ctx->lock);
rt_wqueue_wakeup(&ctx->writer_queue, (void*)POLLOUT);
rt_wqueue_wait(&ctx->reader_queue, 0, RT_WAITING_FOREVER);
rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
}
}
if (ctx->flags & EFD_SEMAPHORE)
{
counter_num = 1;
}
else
{
counter_num = ctx->count;
}
ctx->count -= counter_num;
(*buffer) = counter_num;
rt_mutex_release(&ctx->lock);
return sizeof(counter_num);
}
#ifndef RT_USING_DFS_V2
/**
* @brief Writes data to an event file descriptor.
* @param file Pointer to the file descriptor structure.
* @param buf Pointer to the buffer containing data to write.
* @param count Number of bytes to write.
* @return Number of bytes written on success, otherwise an error code.
*/
static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count)
#else
/**
* @brief Writes data to an event file descriptor.
* @param file Pointer to the file descriptor structure.
* @param buf Pointer to the buffer containing data to write.
* @param count Number of bytes to write.
* @param pos Pointer to the file position (not used).
* @return Number of bytes written on success, otherwise an error code.
*/
static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
#endif
{
struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
rt_ssize_t ret = 0;
rt_uint64_t counter_num;
if (count < sizeof(counter_num))
return -EINVAL;
counter_num = *(rt_uint64_t *)buf;
if (counter_num == EFD_ULLONG_MAX)
return -EINVAL;
ret = -EAGAIN;
rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
if ((EFD_ULLONG_MAX - ctx->count) > counter_num)
{
ret = sizeof(counter_num);
}
else if (!(file->flags & O_NONBLOCK))
{
for (;;)
{
if ((EFD_ULLONG_MAX - ctx->count) >= counter_num)
{
ret = sizeof(counter_num);
break;
}
/* Release the mutex to avoid a deadlock */
rt_mutex_release(&ctx->lock);
rt_wqueue_wait(&ctx->writer_queue, 0, RT_WAITING_FOREVER);
rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
}
}
if (ret > 0)
{
ctx->count += counter_num;
rt_wqueue_wakeup(&ctx->reader_queue, (void *)POLLIN);
}
rt_mutex_release(&ctx->lock);
return ret;
}
/**
* @brief Creates an event file descriptor.
* @param df Pointer to the file descriptor structure.
* @param count Initial value of the event counter.
* @param flags Flags for the event file descriptor.
* @return 0 on success, otherwise an error code.
*/
static int rt_eventfd_create(struct dfs_file *df, unsigned int count, int flags)
{
struct eventfd_ctx *ctx = RT_NULL;
rt_err_t ret = 0;
ctx = (struct eventfd_ctx *)rt_malloc(sizeof(struct eventfd_ctx));
if (ctx == RT_NULL)
{
ret = -ENOMEM;
}
else
{
ctx->count = count;
ctx->flags = flags;
flags &= EFD_SHARED_FCNTL_FLAGS;
flags |= O_RDWR;
rt_mutex_init(&ctx->lock, EVENTFD_MUTEX_NAME, RT_IPC_FLAG_FIFO);
rt_wqueue_init(&ctx->reader_queue);
rt_wqueue_init(&ctx->writer_queue);
df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
if (df->vnode)
{
dfs_vnode_init(df->vnode, FT_NONLOCK, &eventfd_fops);
df->vnode->data = ctx;
df->flags = flags;
}
else
{
rt_mutex_detach(&ctx->lock);
rt_free(ctx);
ret = -ENOMEM;
}
#ifdef RT_USING_DFS_V2
df->fops = &eventfd_fops;
#endif
}
return ret;
}
/**
* @brief Internal function to create an event file descriptor.
* @param count Initial value of the event counter.
* @param flags Flags for the event file descriptor.
* @return File descriptor on success, otherwise an error code.
*/
static int do_eventfd(unsigned int count, int flags)
{
struct dfs_file *file;
int fd;
int status;
rt_ssize_t ret = 0;
if (flags & ~EFD_FLAGS_SET)
{
rt_set_errno(EINVAL);
return -1;
}
fd = fd_new();
if (fd >= 0)
{
ret = fd;
file = fd_get(fd);
status = rt_eventfd_create(file, count, flags);
if (status < 0)
{
fd_release(fd);
rt_set_errno(-status);
ret = -1;
}
}
else
{
rt_set_errno(-fd);
ret = -1;
}
return ret;
}
/**
* @brief Creates an event file descriptor with the specified count and flags.
* @param count Initial value of the event counter.
* @param flags Flags for the event file descriptor.
* @return File descriptor on success, otherwise an error code.
*/
int eventfd(unsigned int count, int flags)
{
return do_eventfd(count, flags);
}

View File

@@ -0,0 +1,16 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-20 zmq810150896 First version
*/
#ifndef __EVENTFD_H__
#define __EVENTFD_H__
int eventfd(unsigned int count, int flags);
#endif /* __EVENTFD_H__ */

View File

@@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = ['mman.c']
CPPPATH = [cwd]
group = DefineGroup('POSIX', src, depend = ['RT_USING_POSIX_MMAN'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,88 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017/11/30 Bernard The first version.
* 2024/03/29 TroyMitchelle Add all function comments
*/
#include <stdint.h>
#include <stdio.h>
#include <rtthread.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/errno.h>
#include "sys/mman.h"
/**
* @brief Maps a region of memory into the calling process's address space.
* @param addr Desired starting address of the mapping.
* @param length Length of the mapping.
* @param prot Protection of the mapped memory region.
* @param flags Type of the mapped memory region.
* @param fd File descriptor of the file to be mapped.
* @param offset Offset within the file to start the mapping.
* @return Upon success, returns a pointer to the mapped region; otherwise, MAP_FAILED is returned.
*/
void *mmap(void *addr, size_t length, int prot, int flags,
int fd, off_t offset)
{
uint8_t *mem;
if (addr)
{
mem = addr;
}
else mem = (uint8_t *)malloc(length);
if (mem)
{
off_t cur;
size_t read_bytes;
cur = lseek(fd, 0, SEEK_SET);
lseek(fd, offset, SEEK_SET);
read_bytes = read(fd, mem, length);
if (read_bytes != length)
{
if (addr == RT_NULL)
{
/* read failed */
free(mem);
mem = RT_NULL;
}
}
lseek(fd, cur, SEEK_SET);
return mem;
}
errno = ENOMEM;
return MAP_FAILED;
}
/**
* @brief Unmaps a mapped region of memory.
* @param addr Starting address of the mapping to be unmapped.
* @param length Length of the mapping.
* @return Upon success, returns 0; otherwise, -1 is returned.
*/
int munmap(void *addr, size_t length)
{
if (addr)
{
free(addr);
return 0;
}
return -1;
}

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017/11/30 Bernard The first version.
* 2024/03/29 TroyMitchelle Add comments for all macros
*/
#ifndef __SYS_MMAN_H__
#define __SYS_MMAN_H__
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/types.h>
#define MAP_FAILED ((void *) -1)
/* mmap flags */
#define MAP_SHARED 0x01 /**< Share the mapping with other processes. */
#define MAP_PRIVATE 0x02 /**< Create a private copy-on-write mapping. */
#define MAP_TYPE 0x0f /**< Mask for type of mapping. */
#define MAP_FIXED 0x10 /**< Interpret addr exactly. */
#define MAP_ANON 0x20 /**< Anonymous mapping. */
#define MAP_ANONYMOUS MAP_ANON /**< Synonym for MAP_ANON. */
#define MAP_NORESERVE 0x4000 /**< Don't reserve swap space for this mapping. */
#define MAP_GROWSDOWN 0x0100 /**< Stack-like segment. */
#define MAP_DENYWRITE 0x0800 /**< ETXTBSY. */
#define MAP_EXECUTABLE 0x1000 /**< Mark it as an executable. */
#define MAP_LOCKED 0x2000 /**< Lock the mapping's pages. */
#define MAP_POPULATE 0x8000 /**< Populate (prefault) pagetables. */
#define MAP_NONBLOCK 0x10000 /**< Do not block on IO. */
#define MAP_STACK 0x20000 /**< Allocation is a stack segment. */
#define MAP_HUGETLB 0x40000 /**< Create a huge page mapping. */
#define MAP_FILE 0 /**< Compatibility */
/* mmap protections */
#define PROT_NONE 0 /**< No access. */
#define PROT_READ 1 /**< Page can be read. */
#define PROT_WRITE 2 /**< Page can be written. */
#define PROT_EXEC 4 /**< Page can be executed. */
#define PROT_GROWSDOWN 0x01000000/**< Extend change to start of growsdown vma (mprotect only). */
#define PROT_GROWSUP 0x02000000/**< Extend change to start of growsup vma (mprotect only). */
/* msync flags */
#define MS_ASYNC 1 /**< Perform asynchronous writes. */
#define MS_INVALIDATE 2 /**< Invalidate mappings after writing. */
#define MS_SYNC 4 /**< Perform synchronous writes. */
/* mlockall flags */
#define MCL_CURRENT 1 /**< Lock all pages which are currently mapped into the address space of the process. */
#define MCL_FUTURE 2 /**< Lock all pages which will become mapped into the address space of the process in the future. */
#define MCL_ONFAULT 4 /**< Lock all pages which are currently mapped into the address space of the process on access. */
void *mmap (void *start, size_t len, int prot, int flags, int fd, off_t off);
int munmap (void *start, size_t len);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,17 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = []
CPPPATH = [cwd]
if GetDepend('RT_USING_POSIX_POLL'):
src += ['poll.c']
if GetDepend('RT_USING_POSIX_SELECT'):
src += ['select.c']
group = DefineGroup('POSIX', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,359 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2016-12-28 Bernard first version
* 2018-03-09 Bernard Add protection for pt->triggered.
* 2023-12-04 Shell Fix return code and error verification
* 2023-12-14 Shell When poll goes to sleep before the waitqueue has added a
* record and finished enumerating all the fd's, it may be
* incorrectly woken up. This is basically because the poll
* mechanism wakeup algorithm does not correctly distinguish
* the current wait state.
* 2024-03-29 TroyMitchelle Add all function comments and comments to structure members
*/
#include <stdint.h>
#include <rthw.h>
#include <rtthread.h>
#include <dfs_file.h>
#include "poll.h"
enum rt_poll_status
{
RT_POLL_STAT_INIT, /**< Poll operation initialization status. */
RT_POLL_STAT_TRIG, /**< Poll operation triggered status. */
RT_POLL_STAT_WAITING /**< Poll operation waiting status. */
};
struct rt_poll_table
{
rt_pollreq_t req; /**< Poll request. */
enum rt_poll_status status; /**< Status of the poll operation. */
rt_thread_t polling_thread; /**< Polling thread associated with the table. */
struct rt_poll_node *nodes; /**< Linked list of poll nodes. */
};
struct rt_poll_node
{
struct rt_wqueue_node wqn; /**< Wait queue node for the poll node. */
struct rt_poll_table *pt; /**< Pointer to the parent poll table. */
struct rt_poll_node *next; /**< Pointer to the next poll node. */
};
static RT_DEFINE_SPINLOCK(_spinlock);
/**
* @brief Wake-up function for the wait queue.
*
* This function is invoked when a node in the wait queue needs to be woken up.
*
* @param wait Pointer to the wait queue node.
* @param key Key associated with the wake-up operation.
* @return Upon successful wake-up, returns 0; otherwise, -1 is returned.
*/
static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
{
rt_ubase_t level;
struct rt_poll_node *pn;
int is_waiting;
if (key && !((rt_ubase_t)key & wait->key))
return -1;
pn = rt_container_of(wait, struct rt_poll_node, wqn);
level = rt_spin_lock_irqsave(&_spinlock);
is_waiting = (pn->pt->status == RT_POLL_STAT_WAITING);
pn->pt->status = RT_POLL_STAT_TRIG;
rt_spin_unlock_irqrestore(&_spinlock, level);
if (is_waiting)
return __wqueue_default_wake(wait, key);
return -1;
}
/**
* @brief Adds a poll request to the wait queue.
*
* This function adds a poll request to the wait queue associated with the specified
* wait queue and poll request.
*
* @param wq Pointer to the wait queue.
* @param req Pointer to the poll request.
*/
static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
{
struct rt_poll_table *pt;
struct rt_poll_node *node;
node = (struct rt_poll_node *)rt_malloc(sizeof(struct rt_poll_node));
if (node == RT_NULL)
return;
pt = rt_container_of(req, struct rt_poll_table, req);
node->wqn.key = req->_key;
rt_list_init(&(node->wqn.list));
node->wqn.polling_thread = pt->polling_thread;
node->wqn.wakeup = __wqueue_pollwake;
node->next = pt->nodes;
node->pt = pt;
pt->nodes = node;
rt_wqueue_add(wq, &node->wqn);
}
/**
* @brief Initializes a poll table.
*
* This function initializes a poll table with the provided poll request, status,
* and polling thread.
*
* @param pt Pointer to the poll table to be initialized.
*/
static void poll_table_init(struct rt_poll_table *pt)
{
pt->req._proc = _poll_add;
pt->status = RT_POLL_STAT_INIT;
pt->nodes = RT_NULL;
pt->polling_thread = rt_thread_self();
}
/**
* @brief Waits for events on the poll table with a specified timeout.
*
* This function waits for events on the poll table with the specified timeout
* in milliseconds.
*
* @param pt Pointer to the poll table.
* @param msec Timeout value in milliseconds.
* @return Upon successful completion, returns 0. If the timeout expires, -RT_ETIMEOUT
* is returned. If the operation is interrupted by a signal, -RT_EINTR is
* returned.
*/
static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
{
rt_int32_t timeout;
int ret = 0;
struct rt_thread *thread;
rt_base_t level;
thread = pt->polling_thread;
timeout = rt_tick_from_millisecond(msec);
level = rt_spin_lock_irqsave(&_spinlock);
if (timeout != 0 && pt->status != RT_POLL_STAT_TRIG)
{
if (rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE) == RT_EOK)
{
if (timeout > 0)
{
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&timeout);
rt_timer_start(&(thread->thread_timer));
rt_set_errno(RT_ETIMEOUT);
}
else
{
rt_set_errno(0);
}
pt->status = RT_POLL_STAT_WAITING;
rt_spin_unlock_irqrestore(&_spinlock, level);
rt_schedule();
level = rt_spin_lock_irqsave(&_spinlock);
if (pt->status == RT_POLL_STAT_WAITING)
pt->status = RT_POLL_STAT_INIT;
}
}
ret = rt_get_errno();
if (ret == RT_EINTR)
ret = -RT_EINTR;
else if (pt->status == RT_POLL_STAT_TRIG)
ret = RT_EOK;
else
ret = -RT_ETIMEOUT;
rt_spin_unlock_irqrestore(&_spinlock, level);
return ret;
}
/**
* @brief Performs poll operation for a single file descriptor.
*
* This function performs a poll operation for a single file descriptor and updates
* the revents field of the pollfd structure accordingly.
*
* @param pollfd Pointer to the pollfd structure.
* @param req Pointer to the poll request.
* @return Upon successful completion, returns the bitmask of events that occurred.
* If an error occurs, -1 is returned.
*/
static int do_pollfd(struct pollfd *pollfd, rt_pollreq_t *req)
{
int mask = 0;
int fd;
fd = pollfd->fd;
if (fd >= 0)
{
struct dfs_file *f = fd_get(fd);
mask = POLLNVAL;
if (f)
{
mask = POLLMASK_DEFAULT;
if (f->vnode->fops->poll)
{
req->_key = pollfd->events | POLLERR | POLLHUP;
mask = f->vnode->fops->poll(f, req);
/* dealwith the device return error -1*/
if (mask < 0)
{
pollfd->revents = 0;
return mask;
}
}
/* Mask out unneeded events. */
mask &= pollfd->events | POLLERR | POLLHUP;
}
}
pollfd->revents = mask;
return mask;
}
/**
* @brief Performs the poll operation on an array of file descriptors.
*
* This function performs the poll operation on an array of file descriptors and
* waits for events with the specified timeout.
*
* @param fds Pointer to the array of pollfd structures.
* @param nfds Number of file descriptors in the array.
* @param pt Pointer to the poll table.
* @param msec Timeout value in milliseconds.
* @return Upon successful completion, returns the number of file descriptors
* for which events were received. If the timeout expires, -RT_ETIMEOUT
* is returned. If the operation is interrupted by a signal, -RT_EINTR is
* returned.
*/
static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, int msec)
{
int num;
int istimeout = 0;
nfds_t n;
struct pollfd *pf;
int ret = 0;
if (msec == 0)
{
pt->req._proc = RT_NULL;
istimeout = 1;
}
while (1)
{
pf = fds;
num = 0;
pt->status = RT_POLL_STAT_INIT;
for (n = 0; n < nfds; n ++)
{
ret = do_pollfd(pf, &pt->req);
if(ret < 0)
{
/*dealwith the device return error -1 */
pt->req._proc = RT_NULL;
return ret;
}
else if(ret > 0)
{
num ++;
pt->req._proc = RT_NULL;
}
pf ++;
}
pt->req._proc = RT_NULL;
if (num || istimeout)
break;
ret = poll_wait_timeout(pt, msec);
if (ret == -RT_EINTR)
return -EINTR;
else if (ret == -RT_ETIMEOUT)
istimeout = 1;
else
istimeout = 0;
}
return num;
}
/**
* @brief Tears down the poll table.
*
* This function tears down the poll table by removing all poll nodes associated
* with it.
*
* @param pt Pointer to the poll table.
*/
static void poll_teardown(struct rt_poll_table *pt)
{
struct rt_poll_node *node, *next;
next = pt->nodes;
while (next)
{
node = next;
rt_wqueue_remove(&node->wqn);
next = node->next;
rt_free(node);
}
}
/**
* @brief Performs the poll operation on a set of file descriptors.
*
* This function performs the poll operation on a set of file descriptors and
* waits for events with the specified timeout.
*
* @param fds Pointer to the array of pollfd structures.
* @param nfds Number of file descriptors in the array.
* @param timeout Timeout value in milliseconds.
* @return Upon successful completion, returns the number of file descriptors
* for which events were received. If the timeout expires, 0 is returned.
* If an error occurs, -1 is returned.
*/
int poll(struct pollfd *fds, nfds_t nfds, int timeout)
{
int num;
struct rt_poll_table table;
poll_table_init(&table);
num = poll_do(fds, nfds, &table, timeout);
poll_teardown(&table);
return num;
}

View File

@@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09-11 Meco Man First version
* 2024-03-29 TroyMitchelle Add all macro comments and comments to structure members
*/
#ifndef __POLL_H__
#define __POLL_H__
#ifdef __cplusplus
extern "C" {
#endif
#ifdef RT_USING_MUSLLIBC
#if !defined(POLLIN) && !defined(POLLOUT)
#define POLLIN 0x001 /**< There is data to read. */
#define POLLPRI 0x002 /**< There is urgent data to read. */
#define POLLOUT 0x004 /**< Writing is now possible. */
#define POLLERR 0x008 /**< Error condition. */
#define POLLHUP 0x010 /**< Hang up. */
#define POLLNVAL 0x020 /**< Invalid polling request. */
#define POLLRDNORM 0x040 /**< Normal data may be read. */
#define POLLRDBAND 0x080 /**< Priority data may be read. */
#define POLLWRNORM 0x100 /**< Writing normal data is possible. */
#define POLLWRBAND 0x200 /**< Writing priority data is possible. */
typedef unsigned int nfds_t;
struct pollfd
{
int fd; /**< File descriptor. */
short events; /**< Requested events. */
short revents; /**< Returned events. */
};
#endif
#else
#if !defined(POLLIN) && !defined(POLLOUT)
#define POLLIN 0x1 /**< There is data to read. */
#define POLLOUT 0x2 /**< Writing is now possible. */
#define POLLERR 0x4 /**< Error condition. */
#define POLLNVAL 0x8 /**< Invalid polling request. */
/* Below values are unimplemented */
#define POLLRDNORM 0x10 /**< Normal data may be read. */
#define POLLRDBAND 0x20 /**< Priority data may be read. */
#define POLLPRI 0x40 /**< There is urgent data to read. */
#define POLLWRNORM 0x80 /**< Writing normal data is possible. */
#define POLLWRBAND 0x100 /**< Writing priority data is possible. */
#define POLLHUP 0x200 /**< Hang up. */
typedef unsigned int nfds_t;
struct pollfd
{
int fd; /**< File descriptor. */
short events; /**< Requested events. */
short revents; /**< Returned events. */
};
#endif
#endif /* !defined(POLLIN) && !defined(POLLOUT) */
#define POLLMASK_DEFAULT (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
int poll(struct pollfd *fds, nfds_t nfds, int timeout);
#ifdef __cplusplus
}
#endif
#endif /* __POLL_H__ */

View File

@@ -0,0 +1,193 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2016-12-28 Bernard first version
* 2024-04-08 TroyMitchell Add all function comments
*/
#include <rtthread.h>
#include <poll.h>
#include <sys/select.h>
/**
* @brief Initialize the file descriptor set to have zero bits for all file descriptors.
* @param set Pointer to the file descriptor set to be initialized.
* @param nfds The maximum file descriptor in the set plus one.
* @note The actual size of the 'fd_set' is determined based on the parameter 'nfds'.
*/
static void fdszero(fd_set *set, int nfds)
{
fd_mask *m;
int n;
/*
The 'sizeof(fd_set)' of the system space may differ from user space,
so the actual size of the 'fd_set' is determined here with the parameter 'nfds'
*/
m = (fd_mask *)set;
for (n = 0; n < nfds; n += (sizeof(fd_mask) * 8))
{
rt_memset(m, 0, sizeof(fd_mask));
m ++;
}
}
/**
* @brief Synchronous I/O multiplexing: multiplex input/output over a set of file descriptors.
* @param nfds The highest-numbered file descriptor in any of the three sets, plus 1.
* @param readfds A pointer to a set of file descriptors to be checked for read readiness.
* @param writefds A pointer to a set of file descriptors to be checked for write readiness.
* @param exceptfds A pointer to a set of file descriptors to be checked for exceptions.
* @param timeout The maximum time to wait for any of the specified file descriptors to become ready.
* @return Upon successful completion, the total number of file descriptors in all the sets that are ready for the requested operation is returned; otherwise, -1 is returned on error.
*/
int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout)
{
int fd;
int npfds;
int msec;
int ndx;
int ret;
struct pollfd *pollset = RT_NULL;
/* How many pollfd structures do we need to allocate? */
for (fd = 0, npfds = 0; fd < nfds; fd++)
{
/* Check if any monitor operation is requested on this fd */
if ((readfds && FD_ISSET(fd, readfds)) ||
(writefds && FD_ISSET(fd, writefds)) ||
(exceptfds && FD_ISSET(fd, exceptfds)))
{
npfds++;
}
}
/* Allocate the descriptor list for poll() */
if (npfds > 0)
{
pollset = (struct pollfd *)rt_calloc(npfds, sizeof(struct pollfd));
if (!pollset)
{
return -1;
}
}
/* Initialize the descriptor list for poll() */
for (fd = 0, ndx = 0; fd < nfds; fd++)
{
int incr = 0;
/* The readfs set holds the set of FDs that the caller can be assured
* of reading from without blocking. Note that POLLHUP is included as
* a read-able condition. POLLHUP will be reported at the end-of-file
* or when a connection is lost. In either case, the read() can then
* be performed without blocking.
*/
if (readfds && FD_ISSET(fd, readfds))
{
pollset[ndx].fd = fd;
pollset[ndx].events |= POLLIN;
incr = 1;
}
if (writefds && FD_ISSET(fd, writefds))
{
pollset[ndx].fd = fd;
pollset[ndx].events |= POLLOUT;
incr = 1;
}
if (exceptfds && FD_ISSET(fd, exceptfds))
{
pollset[ndx].fd = fd;
incr = 1;
}
ndx += incr;
}
RT_ASSERT(ndx == npfds);
/* Convert the timeout to milliseconds */
if (timeout)
{
msec = (int)timeout->tv_sec * 1000 + (int)timeout->tv_usec / 1000;
}
else
{
msec = -1;
}
/* Then let poll do all of the real work. */
ret = poll(pollset, npfds, msec);
/* Now set up the return values */
if (readfds)
{
fdszero(readfds, nfds);
}
if (writefds)
{
fdszero(writefds, nfds);
}
if (exceptfds)
{
fdszero(exceptfds, nfds);
}
/* Convert the poll descriptor list back into selects 3 bitsets */
if (ret > 0)
{
ret = 0;
for (ndx = 0; ndx < npfds; ndx++)
{
/* Check for read conditions. Note that POLLHUP is included as a
* read condition. POLLHUP will be reported when no more data will
* be available (such as when a connection is lost). In either
* case, the read() can then be performed without blocking.
*/
if (readfds)
{
if (pollset[ndx].revents & (POLLIN | POLLHUP))
{
FD_SET(pollset[ndx].fd, readfds);
ret++;
}
}
/* Check for write conditions */
if (writefds)
{
if (pollset[ndx].revents & POLLOUT)
{
FD_SET(pollset[ndx].fd, writefds);
ret++;
}
}
/* Check for exceptions */
if (exceptfds)
{
if (pollset[ndx].revents & POLLERR)
{
FD_SET(pollset[ndx].fd, exceptfds);
ret++;
}
}
}
}
if (pollset) rt_free(pollset);
return ret;
}

View File

@@ -0,0 +1,14 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = []
CPPPATH = [cwd]
if GetDepend('RT_USING_POSIX_SIGNALFD'):
src += ['signalfd.c']
group = DefineGroup('POSIX', src, depend = ['RT_USING_SMART','RT_USING_POSIX_SIGNALFD'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,385 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-08-29 zmq810150896 first version
* 2024-04-08 TroyMitchell Add all function comments
*/
#include <rtthread.h>
#include <sys/signalfd.h>
#include <dfs_file.h>
#include <signal.h>
#include <rthw.h>
#include <sys/time.h>
#include <lwp_signal.h>
#include <lwp.h>
#include <poll.h>
#define SIGNALFD_MUTEX_NAME "signalfd"
#define SIGINFO_MAX 10
#define SIGNALFD_SHART_MAX RT_SIGNALFD_MAX_NUM
static int is_head_init = 0;
struct rt_signalfd_ctx
{
sigset_t sigmask;
struct rt_mutex lock;
siginfo_t info[SIGINFO_MAX];
int sig_num;
rt_wqueue_t signalfd_queue;
struct rt_lwp *lwp[SIGNALFD_SHART_MAX];
};
static int signalfd_close(struct dfs_file *file);
static int signalfd_poll(struct dfs_file *file, struct rt_pollreq *req);
#ifndef RT_USING_DFS_V2
static ssize_t signalfd_read(struct dfs_file *file, void *buf, size_t count);
#else
static ssize_t signalfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos);
#endif
static int signalfd_add_notify(struct rt_signalfd_ctx *sfd);
static const struct dfs_file_ops signalfd_fops =
{
.close = signalfd_close,
.poll = signalfd_poll,
.read = signalfd_read,
};
/**
* @brief Closes the file descriptor associated with a signalfd file.
* @param file Pointer to the file structure.
* @return Upon successful completion, returns 0; otherwise, returns an error code.
*/
static int signalfd_close(struct dfs_file *file)
{
struct rt_signalfd_ctx *sfd;
if (file->vnode->ref_count != 1)
return 0;
sfd = file->vnode->data;
if (sfd)
{
rt_mutex_detach(&sfd->lock);
rt_free(sfd);
}
return 0;
}
/**
* @brief Adds a signalfd file descriptor to the poll queue and checks for pending events.
* @param file Pointer to the file structure.
* @param req Pointer to the poll request structure.
* @return The events that are ready on the file descriptor.
*/
static int signalfd_poll(struct dfs_file *file, struct rt_pollreq *req)
{
struct rt_signalfd_ctx *sfd;
int events = 0;
if (file->vnode)
{
sfd = file->vnode->data;
rt_poll_add(&sfd->signalfd_queue, req);
signalfd_add_notify(sfd);
rt_mutex_take(&sfd->lock, RT_WAITING_FOREVER);
if (sfd->sig_num)
events |= POLLIN;
rt_mutex_release(&sfd->lock);
}
return events;
}
#ifndef RT_USING_DFS_V2
/**
* @brief Reads signals from a signalfd file descriptor.
* @param file Pointer to the file structure.
* @param buf Pointer to the buffer to store the signals.
* @param count Maximum number of bytes to read.
* @return Upon successful completion, returns the number of bytes read; otherwise, returns an error code.
*/
static ssize_t signalfd_read(struct dfs_file *file, void *buf, size_t count)
#else
/**
* @brief Reads signals from a signalfd file descriptor with file offset.
* @param file Pointer to the file structure.
* @param buf Pointer to the buffer to store the signals.
* @param count Maximum number of bytes to read.
* @param pos Pointer to the file offset.
* @return Upon successful completion, returns the number of bytes read; otherwise, returns an negative error code.
*/
static ssize_t signalfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
#endif
{
struct rt_signalfd_ctx *sfd = RT_NULL;
struct signalfd_siginfo *buffer = RT_NULL;
int user_buf_num = 0;
int sig_num = 0;
int i = 0;
rt_err_t ret = -1;
if (sizeof(struct signalfd_siginfo) > count)
return -1;
if (buf == RT_NULL)
return -1;
buffer = (struct signalfd_siginfo *)buf;
user_buf_num = count / sizeof(struct signalfd_siginfo);
if (file->vnode)
{
sfd = file->vnode->data;
signalfd_add_notify(sfd);
if ((sfd->sig_num == 0) && (file->flags & O_NONBLOCK))
{
ret = -EAGAIN;
}
else
{
if (sfd->sig_num == 0)
{
rt_wqueue_wait(&sfd->signalfd_queue, 0, RT_WAITING_FOREVER);
}
rt_mutex_take(&sfd->lock, RT_WAITING_FOREVER);
for (i = 0; i < sfd->sig_num; i++)
{
if (i < user_buf_num)
{
memcpy(&buffer[i], &sfd->info[i], sizeof(struct signalfd_siginfo));
sfd->sig_num -= 1;
sig_num += 1;
}
else
{
break;
}
}
for (int j = 0; j < sfd->sig_num; j ++)
{
memcpy(&sfd->info[j], &sfd->info[i ++], sizeof(struct signalfd_siginfo));
}
rt_mutex_release(&sfd->lock);
ret = sizeof(struct signalfd_siginfo) * sig_num;
}
}
return ret;
}
/**
* @brief Callback function for signalfd file descriptor notifications.
* @param signalfd_queue Pointer to the signalfd queue.
* @param signum Signal number.
*/
static void signalfd_callback(rt_wqueue_t *signalfd_queue, int signum)
{
struct rt_signalfd_ctx *sfd;
sfd = rt_container_of(signalfd_queue, struct rt_signalfd_ctx, signalfd_queue);
if (sfd)
{
if (sigismember(&sfd->sigmask, signum))
{
rt_mutex_take(&sfd->lock, RT_WAITING_FOREVER);
if (sfd->sig_num < SIGINFO_MAX)
{
sfd->info[sfd->sig_num].si_signo = signum;
sfd->sig_num += 1;
}
rt_mutex_release(&sfd->lock);
rt_wqueue_wakeup(signalfd_queue, (void*)POLLIN);
}
}
}
/**
* @brief Adds a signal file descriptor notification.
* @param sfd Pointer to the signalfd context.
* @return Upon successful completion, returns 0; otherwise, returns an error code.
*/
static int signalfd_add_notify(struct rt_signalfd_ctx *sfd)
{
struct rt_lwp_notify *lwp_notify;
rt_err_t ret = -1;
rt_slist_t *node;
int is_lwp = 0;
rt_mutex_take(&sfd->lock, RT_WAITING_FOREVER);
for (int i = 0; i < is_head_init; i++)
{
if (sfd->lwp[i])
{
if (sfd->lwp[i] == lwp_self())
{
is_lwp = 1;
}
}
}
if (is_lwp == 0)
{
sfd->lwp[is_head_init] = lwp_self();
if (is_head_init == 0)
{
rt_slist_init(&sfd->lwp[is_head_init]->signalfd_notify_head);
}
lwp_notify = (struct rt_lwp_notify *)rt_malloc(sizeof(struct rt_lwp_notify));
if (lwp_notify)
{
lwp_notify->notify = signalfd_callback;
lwp_notify->signalfd_queue = &sfd->signalfd_queue;
rt_slist_append(&sfd->lwp[is_head_init]->signalfd_notify_head, &(lwp_notify->list_node));
is_head_init ++;
ret = 0;
}
else
{
rt_slist_for_each(node, &sfd->lwp[is_head_init]->signalfd_notify_head)
{
struct rt_lwp_notify *n = rt_slist_entry(node, struct rt_lwp_notify, list_node);
rt_slist_remove(&sfd->lwp[is_head_init]->signalfd_notify_head, &n->list_node);
rt_free(n);
}
rt_set_errno(ENOMEM);
}
}
rt_mutex_release(&sfd->lock);
return ret;
}
/**
* @brief Creates a new signalfd file descriptor or modifies an existing one.
* @param fd File descriptor to modify (-1 to create a new one).
* @param mask Signal mask.
* @param flags File descriptor flags.
* @return Upon successful completion, returns the file descriptor number; otherwise, returns an error code.
*/
static int signalfd_do(int fd, const sigset_t *mask, int flags)
{
struct dfs_file *df;
struct rt_signalfd_ctx *sfd;
rt_err_t ret = 0;
if (fd == -1)
{
fd = fd_new();
if (fd < 0)
return -1;
ret = fd;
df = fd_get(fd);
if (df)
{
sfd = (struct rt_signalfd_ctx *)rt_malloc(sizeof(struct rt_signalfd_ctx));
if (sfd)
{
df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
if (df->vnode)
{
dfs_vnode_init(df->vnode, FT_REGULAR, &signalfd_fops);
df->vnode->data = sfd;
for (int i = 0; i < is_head_init; i++)
{
sfd->lwp[i] = RT_NULL;
}
sigemptyset(&sfd->sigmask);
memcpy(&sfd->sigmask, mask, sizeof(sigset_t));
rt_mutex_init(&sfd->lock, SIGNALFD_MUTEX_NAME, RT_IPC_FLAG_FIFO);
rt_wqueue_init(&sfd->signalfd_queue);
if (signalfd_add_notify(sfd) < 0)
{
is_head_init = 0;
fd_release(fd);
rt_free(sfd);
ret = -1;
}
sfd->sig_num = 0;
df->flags |= flags;
#ifdef RT_USING_DFS_V2
df->fops = &signalfd_fops;
#endif
}
else
{
fd_release(fd);
rt_free(sfd);
ret = -1;
}
}
else
{
fd_release(fd);
ret = -1;
}
}
else
{
fd_release(fd);
}
}
else
{
df = fd_get(fd);
if (df)
{
sfd = df->vnode->data;
df->flags = flags;
sigemptyset(&sfd->sigmask);
memcpy(&sfd->sigmask, mask, sizeof(sigset_t));
ret = fd;
}
else
{
rt_set_errno(EBADF);
ret = -1;
}
}
return ret;
}
/**
* @brief Creates a new signalfd file descriptor or modifies an existing one.
* @param fd File descriptor to modify (-1 to create a new one).
* @param mask Signal mask.
* @param flags File descriptor flags.
* @return Upon successful completion, returns the file descriptor number; otherwise, returns an error code.
*/
int signalfd(int fd, const sigset_t *mask, int flags)
{
return signalfd_do(fd, mask, flags);
}

View File

@@ -0,0 +1,6 @@
from building import *
src = ['stdio.c']
group = DefineGroup('POSIX', src, depend = ['RT_USING_POSIX_STDIO'], CPPPATH = [GetCurrentDir()])
Return('group')

View File

@@ -0,0 +1,32 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017/10/15 bernard the first version
*/
#ifndef __POSIX_STDIO_H__
#define __POSIX_STDIO_H__
#include <rtconfig.h>
#include <stdio.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
int rt_posix_stdio_init(void);
int rt_posix_stdio_get_console(void);
int rt_posix_stdio_set_console(const char* device_name, int mode);
ssize_t getdelim(char **lineptr, size_t *n, int delim, FILE *stream);
ssize_t getline(char **lineptr, size_t *n, FILE *stream);
#ifdef __cplusplus
}
#endif
#endif /* __POSIX_STDIO_H__ */

View File

@@ -0,0 +1,275 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017/10/15 bernard the first version
* 2023/08/07 Meco Man rename as posix/stdio.c
*/
#include <rtthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <limits.h>
#include <fcntl.h>
#include <sys/time.h>
#include <sys/errno.h>
#include "posix/stdio.h"
#define STDIO_DEVICE_NAME_MAX 32
int sys_dup2(int oldfd, int new);
int rt_posix_stdio_init(void)
{
rt_device_t dev_console;
dev_console = rt_console_get_device();
if (dev_console)
{
int fd = rt_posix_stdio_set_console(dev_console->parent.name, O_RDWR);
if (fd < 0)
{
return -1;
}
/* set fd (0, 1, 2) */
sys_dup2(fd, 0);
sys_dup2(fd, 1);
sys_dup2(fd, 2);
}
return 0;
}
INIT_ENV_EXPORT(rt_posix_stdio_init);
#if defined(RT_USING_NEWLIBC)
#define NEWLIB_VERSION_NUM (__NEWLIB__ * 10000U + __NEWLIB_MINOR__ * 100U + __NEWLIB_PATCHLEVEL__)
static FILE* std_console = NULL;
int rt_posix_stdio_set_console(const char* device_name, int mode)
{
FILE *fp;
char name[STDIO_DEVICE_NAME_MAX];
char *file_mode;
rt_snprintf(name, sizeof(name) - 1, "/dev/%s", device_name);
name[STDIO_DEVICE_NAME_MAX - 1] = '\0';
if (mode == O_RDWR)
{
file_mode = "r+";
}
else if (mode == O_WRONLY)
{
file_mode = "wb";
}
else
{
file_mode = "rb";
}
fp = fopen(name, file_mode);
if (fp)
{
setvbuf(fp, NULL, _IONBF, 0);
if (std_console)
{
fclose(std_console);
std_console = NULL;
}
std_console = fp;
if (mode == O_RDWR)
{
_GLOBAL_REENT->_stdin = std_console;
}
else
{
_GLOBAL_REENT->_stdin = NULL;
}
if (mode == O_RDONLY)
{
_GLOBAL_REENT->_stdout = NULL;
_GLOBAL_REENT->_stderr = NULL;
}
else
{
_GLOBAL_REENT->_stdout = std_console;
_GLOBAL_REENT->_stderr = std_console;
}
#if (NEWLIB_VERSION_NUM < 30400U) || (NEWLIB_VERSION_NUM >= 40000U && NEWLIB_VERSION_NUM < 40300U)
_GLOBAL_REENT->__sdidinit = 1; /* __sdidinit is obselete */
#endif
}
if (std_console)
return fileno(std_console);
return -1;
}
int rt_posix_stdio_get_console(void)
{
if (std_console)
return fileno(std_console);
else
return -1;
}
#elif defined(RT_USING_MUSLLIBC)
static FILE* std_console = NULL;
int rt_posix_stdio_set_console(const char* device_name, int mode)
{
FILE *fp;
char name[STDIO_DEVICE_NAME_MAX];
char *file_mode;
rt_snprintf(name, sizeof(name) - 1, "/dev/%s", device_name);
name[STDIO_DEVICE_NAME_MAX - 1] = '\0';
if (mode == O_RDWR) file_mode = "r+";
else if (mode == O_WRONLY) file_mode = "wb";
else file_mode = "rb";
fp = fopen(name, file_mode);
if (fp)
{
setvbuf(fp, NULL, _IONBF, 0);
if (std_console)
{
fclose(std_console);
std_console = NULL;
}
std_console = fp;
}
if (std_console)
{
int fd = fileno(std_console);
return fd;
}
return -1;
}
int rt_posix_stdio_get_console(void)
{
int ret = -1;
if (std_console)
{
ret = fileno(std_console);
}
return ret;
}
#else
static int std_fd = -1;
int rt_posix_stdio_set_console(const char* device_name, int mode)
{
int fd;
char name[STDIO_DEVICE_NAME_MAX];
rt_snprintf(name, sizeof(name) - 1, "/dev/%s", device_name);
name[STDIO_DEVICE_NAME_MAX - 1] = '\0';
fd = open(name, mode, 0);
if (fd >= 0)
{
if (std_fd >= 0)
{
close(std_fd);
}
std_fd = fd;
}
return std_fd;
}
int rt_posix_stdio_get_console(void) {
return std_fd;
}
#endif /* defined(RT_USING_NEWLIBC) */
ssize_t getdelim(char **lineptr, size_t *n, int delim, FILE *stream)
{
char *cur_pos, *new_lineptr;
size_t new_lineptr_len;
int c;
if (lineptr == NULL || n == NULL || stream == NULL)
{
errno = EINVAL;
return -1;
}
if (*lineptr == NULL)
{
*n = 128; /* init len */
if ((*lineptr = (char *)malloc(*n)) == NULL)
{
errno = ENOMEM;
return -1;
}
}
cur_pos = *lineptr;
for (;;)
{
c = getc(stream);
if (ferror(stream) || (c == EOF && cur_pos == *lineptr))
return -1;
if (c == EOF)
break;
if ((*lineptr + *n - cur_pos) < 2)
{
if (LONG_MAX / 2 < *n)
{
#ifdef EOVERFLOW
errno = EOVERFLOW;
#else
errno = ERANGE; /* no EOVERFLOW defined */
#endif
return -1;
}
new_lineptr_len = *n * 2;
if ((new_lineptr = (char *)realloc(*lineptr, new_lineptr_len)) == NULL)
{
errno = ENOMEM;
return -1;
}
cur_pos = new_lineptr + (cur_pos - *lineptr);
*lineptr = new_lineptr;
*n = new_lineptr_len;
}
*cur_pos++ = (char)c;
if (c == delim)
break;
}
*cur_pos = '\0';
return (ssize_t)(cur_pos - *lineptr);
}
ssize_t getline(char **lineptr, size_t *n, FILE *stream)
{
return getdelim(lineptr, n, '\n', stream);
}

View File

@@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = ['termios.c']
CPPPATH = [cwd]
group = DefineGroup('POSIX', src, depend = ['RT_USING_POSIX_TERMIOS'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,273 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017-08-30 Bernard The first version
* 2024-04-26 TroyMitchell Add comments for all functions
*/
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/errno.h>
#include "termios.h"
#include <rtthread.h>
/**
* @brief Gets the current attributes of a terminal device.
* @param fd File descriptor of the terminal device.
* @param tio Pointer to a struct termios where the attributes will be stored.
* @return Upon successful completion, returns 0; otherwise, returns -1.
*
* @note This function retrieves the current attributes of a terminal device specified by the file descriptor fd.
* It uses the ioctl system call with the TCGETA command to obtain the attributes and stores them in the
* struct termios pointed to by tio.
* If the ioctl operation fails, the function returns -1 to indicate an error.
*/
int tcgetattr(int fd, struct termios *tio)
{
/* Get the current serial port settings. */
if (ioctl(fd, TCGETA, tio))
return -1;
return 0;
}
/**
* @brief Sets the attributes of a terminal device.
* @param fd File descriptor of the terminal device.
* @param act Action to be taken for the attribute change (TCSANOW, TCSADRAIN, or TCSAFLUSH).
* @param tio Pointer to a struct termios containing the new attributes.
* @return Upon successful completion, returns 0; otherwise, returns -1 and sets errno to indicate the error.
*
* @note This function sets the attributes of a terminal device specified by the file descriptor fd.
* The act parameter determines when the attribute change takes effect:
* - TCSANOW: Make the change immediately.
* - TCSADRAIN: Make the change after all currently written data has been transmitted.
* - TCSAFLUSH: Make the change after all currently written data has been transmitted, and discard
* any received but unread data.
* The new attributes are specified in the struct termios pointed to by tio.
* The ioctl system call is used to set the attributes based on the value of act.
* If the ioctl operation fails or an invalid action is specified, errno is set to indicate the error,
* and the function returns -1.
*/
int tcsetattr(int fd, int act, const struct termios *tio)
{
switch (act)
{
case TCSANOW:
/* make the change immediately */
return (ioctl(fd, TCSETA, (void*)tio));
case TCSADRAIN:
/*
* Don't make the change until all currently written data
* has been transmitted.
*/
return (ioctl(fd, TCSETAW, (void*)tio));
case TCSAFLUSH:
/* Don't make the change until all currently written data
* has been transmitted, at which point any received but
* unread data is also discarded.
*/
return (ioctl(fd, TCSETAF, (void*)tio));
default:
errno = EINVAL;
return (-1);
}
}
/**
* @brief Gets the session ID of a terminal.
* @param fd File descriptor of the terminal device.
* @return Always returns 0.
*
* @note This function is a stub and always returns 0.
* In POSIX systems, tcgetsid() is used to get the session ID of the terminal associated with the file descriptor fd.
* However, this function does not provide this functionality and simply returns 0 as a placeholder.
*/
pid_t tcgetsid(int fd)
{
return 0;
}
/**
* @brief Gets the output baud rate from a termios structure.
* @param tio Pointer to a struct termios containing the terminal attributes.
* @return Output baud rate extracted from the terminal attributes.
*
* @note This function extracts the output baud rate from the termios structure pointed to by tio.
* It retrieves the baud rate from the c_cflag member of the termios structure using the CBAUD mask.
* The output baud rate is returned as a speed_t type.
*/
speed_t cfgetospeed(const struct termios *tio)
{
return tio->c_cflag & CBAUD;
}
/**
* @brief Gets the input baud rate from a termios structure.
* @param tio Pointer to a struct termios containing the terminal attributes.
* @return Input baud rate extracted from the terminal attributes.
*
* @note This function is a wrapper for the cfgetospeed() function.
* It returns the input baud rate by calling cfgetospeed() with the termios structure pointer tio.
*/
speed_t cfgetispeed(const struct termios *tio)
{
return cfgetospeed(tio);
}
/**
* @brief Sets the output baud rate in a termios structure.
* @param tio Pointer to a struct termios where the output baud rate will be set.
* @param speed Output baud rate to be set.
* @return Upon successful completion, returns 0; otherwise, returns -1 and sets errno to indicate the error.
*
* @note This function sets the output baud rate in the termios structure pointed to by tio.
* The speed parameter specifies the baud rate to be set.
* If the specified speed exceeds the CBAUD mask, indicating an invalid baud rate value,
* errno is set to EINVAL, and the function returns -1.
* Otherwise, the function clears the CBAUD bits in the c_cflag member of the termios structure
* and sets them to the specified speed value. It then returns 0 to indicate success.
*/
int cfsetospeed(struct termios *tio, speed_t speed)
{
if (speed & ~CBAUD)
{
errno = EINVAL;
return -1;
}
tio->c_cflag &= ~CBAUD;
tio->c_cflag |= speed;
return 0;
}
/**
* @brief Sets the input baud rate in a termios structure.
* @param tio Pointer to a struct termios where the input baud rate will be set.
* @param speed Input baud rate to be set.
* @return Upon successful completion, returns 0; otherwise, returns -1 and sets errno to indicate the error.
*
* @note This function sets the input baud rate in the termios structure pointed to by tio.
* The speed parameter specifies the input baud rate to be set.
* If the speed parameter is non-zero, indicating a valid baud rate, the function calls
* cfsetospeed() to set both the input and output baud rates to the specified value.
* If speed is zero, indicating that no baud rate needs to be set for input, the function returns 0
* without making any changes to the termios structure.
*/
int cfsetispeed(struct termios *tio, speed_t speed)
{
return speed ? cfsetospeed(tio, speed) : 0;
}
/**
* @brief Sends a break signal on a terminal.
* @param fd File descriptor of the terminal device.
* @param dur Duration of the break signal (ignored).
* @return Always returns 0.
*
* @note This function is a stub and does not send an actual break signal.
* Sending a break signal with a nonzero duration is implementation-defined,
* so this function ignores the duration parameter and always returns 0.
*/
int tcsendbreak(int fd, int dur)
{
/* nonzero duration is implementation-defined, so ignore it */
return 0;
}
/**
* @brief Flushes data from a terminal input or output queue.
* @param fd File descriptor of the terminal device.
* @param queue Queue to be flushed (TCIFLUSH, TCOFLUSH, or TCIOFLUSH).
* @return Upon successful completion, returns 0; otherwise, returns -1 and sets errno to indicate the error.
*
* @note This function flushes data from the input or output queue of the terminal device specified by the file descriptor fd.
* The queue parameter specifies which queue to flush:
* - TCIFLUSH: Flushes data from the input queue.
* - TCOFLUSH: Flushes data from the output queue.
* - TCIOFLUSH: Flushes data from both the input and output queues.
* The ioctl system call is used with the TCFLSH command to perform the flush operation.
* If the ioctl operation fails, errno is set to indicate the error, and the function returns -1.
*/
int tcflush(int fd, int queue)
{
return ioctl(fd, TCFLSH, (void*)(rt_ubase_t)queue);
}
/**
* @brief Controls the terminal flow control.
* @param fd File descriptor of the terminal device.
* @param action Action to be taken for flow control (TCOOFF, TCOON, TCIOFF, or TCION).
* @return Upon successful completion, returns 0; otherwise, returns -1 and sets errno to indicate the error.
*
* @note This function controls the flow of data on the terminal device specified by the file descriptor fd.
* The action parameter determines the flow control action to be taken:
* - TCOOFF: Suspends output transmission.
* - TCOON: Restarts output transmission.
* - TCIOFF: Suspends input transmission.
* - TCION: Restarts input transmission.
* The ioctl system call is used with the TCXONC command to perform the flow control operation.
* If the ioctl operation fails, errno is set to indicate the error, and the function returns -1.
*/
int tcflow(int fd, int action)
{
return ioctl(fd, TCXONC, (void*)(rt_ubase_t)action);
}
/**
* @brief Waits until all output written to the terminal is transmitted.
* @param fd File descriptor of the terminal device.
* @return Always returns 0.
*
* @note This function is a stub and does not perform any action.
* In POSIX systems, tcdrain() is used to wait until all output written to the terminal
* is transmitted, but this function does not provide this functionality and simply returns 0 as a placeholder.
*/
int tcdrain(int fd)
{
return 0;
}
/**
* @brief Sets the terminal attributes to raw mode.
* @param t Pointer to a struct termios where the terminal attributes will be set.
*
* @note This function sets the terminal attributes pointed to by t to raw mode.
* Raw mode disables special input and output processing features, making the terminal behave more like a data stream.
* The following modifications are made to the termios structure:
* - Input flags (c_iflag) are cleared of various processing flags.
* - Output flags (c_oflag) are cleared to disable output processing.
* - Local flags (c_lflag) are cleared to disable canonical mode, echoing, and signal handling.
* - Control flags (c_cflag) are modified to set character size to 8 bits and disable parity.
* - The VMIN and VTIME control characters are set to 1 and 0, respectively, for non-blocking read behavior.
*/
void cfmakeraw(struct termios *t)
{
t->c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP|INLCR|IGNCR|ICRNL|IXON);
t->c_oflag &= ~OPOST;
t->c_lflag &= ~(ECHO|ECHONL|ICANON|ISIG|IEXTEN);
t->c_cflag &= ~(CSIZE|PARENB);
t->c_cflag |= CS8;
t->c_cc[VMIN] = 1;
t->c_cc[VTIME] = 0;
}
/**
* @brief Sets the input and output baud rates in a termios structure.
* @param tio Pointer to a struct termios where the input and output baud rates will be set.
* @param speed Baud rate to be set for both input and output.
* @return Upon successful completion, returns 0; otherwise, returns -1 and sets errno to indicate the error.
*
* @note This function is a wrapper for the cfsetospeed() function.
* It sets both the input and output baud rates in the termios structure pointed to by tio to the specified speed.
* The function returns the result of cfsetospeed(), which sets the baud rate for both input and output.
*/
int cfsetspeed(struct termios *tio, speed_t speed)
{
return cfsetospeed(tio, speed);
}

View File

@@ -0,0 +1,248 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017/08/30 Bernard The first version
* 2021/12/10 linzhenxing put tty system
*/
#ifndef __TERMIOS_H__
#define __TERMIOS_H__
#include <sys/types.h>
#include <sys/ioctl.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef unsigned char cc_t;
typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#define NCCS 32
struct termios {
tcflag_t c_iflag;
tcflag_t c_oflag;
tcflag_t c_cflag;
tcflag_t c_lflag;
cc_t c_line;
cc_t c_cc[NCCS];
speed_t __c_ispeed;
speed_t __c_ospeed;
};
#ifndef NCC
#define NCC 8
struct termio
{
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
#endif
/* c_cc characters */
#define VINTR 0
#define VQUIT 1
#define VERASE 2
#define VKILL 3
#define VEOF 4
#define VTIME 5
#define VMIN 6
#define VSWTC 7
#define VSTART 8
#define VSTOP 9
#define VSUSP 10
#define VEOL 11
#define VREPRINT 12
#define VDISCARD 13
#define VWERASE 14
#define VLNEXT 15
#define VEOL2 16
/* c_iflag bits */
#define IGNBRK 0000001
#define BRKINT 0000002
#define IGNPAR 0000004
#define PARMRK 0000010
#define INPCK 0000020
#define ISTRIP 0000040
#define INLCR 0000100
#define IGNCR 0000200
#define ICRNL 0000400
#define IUCLC 0001000
#define IXON 0002000
#define IXANY 0004000
#define IXOFF 0010000
#define IMAXBEL 0020000
#define IUTF8 0040000
/* c_oflag bits */
#define OPOST 0000001
#define OLCUC 0000002
#define ONLCR 0000004
#define OCRNL 0000010
#define ONOCR 0000020
#define ONLRET 0000040
#define OFILL 00000100
#define OFDEL 00000200
#define NLDLY 00001400
#define NL0 00000000
#define NL1 00000400
#define NL2 00001000
#define NL3 00001400
#define TABDLY 00006000
#define TAB0 00000000
#define TAB1 00002000
#define TAB2 00004000
#define TAB3 00006000
#define CRDLY 00030000
#define KCR0 00000000
#define KCR1 00010000
#define KCR2 00020000
#define KCR3 00030000
#define FFDLY 00040000
#define FF0 00000000
#define FF1 00040000
#define BSDLY 00100000
#define BS0 00000000
#define BS1 00100000
#define VTDLY 00200000
#define VT0 00000000
#define VT1 00200000
#define XTABS 01000000
#define B0 0000000
#define B50 0000001
#define B75 0000002
#define B110 0000003
#define B134 0000004
#define B150 0000005
#define B200 0000006
#define B300 0000007
#define B600 0000010
#define B1200 0000011
#define B1800 0000012
#define B2400 0000013
#define B4800 0000014
#define B9600 0000015
#define B19200 0000016
#define B38400 0000017
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
#define B460800 0010004
#define B500000 0010005
#define B576000 0010006
#define B921600 0010007
#define B1000000 0010010
#define B1152000 0010011
#define B1500000 0010012
#define B2000000 0010013
#define B2500000 0010014
#define B3000000 0010015
#define B3500000 0010016
#define B4000000 0010017
#define CSIZE 0000060
#define CS5 0000000
#define CS6 0000020
#define CS7 0000040
#define CS8 0000060
#define CSTOPB 0000100
#define CREAD 0000200
#define PARENB 0000400
#define PARODD 0001000
#define HUPCL 0002000
#define CLOCAL 0004000
/* c_lflag bits */
#define ISIG 0000001
#define ICANON 0000002
#define XCASE 0000004
#define ECHO 0000010
#define ECHOE 0000020
#define ECHOK 0000040
#define ECHONL 0000100
#define NOFLSH 0000200
#define TOSTOP 0000400
#define ECHOCTL 0001000
#define ECHOPRT 0002000
#define ECHOKE 0004000
#define FLUSHO 0010000
#define PENDIN 0040000
#define IEXTEN 0100000
#define EXTPROC 0200000
#define TCOOFF 0
#define TCOON 1
#define TCIOFF 2
#define TCION 3
#define TCIFLUSH 0
#define TCOFLUSH 1
#define TCIOFLUSH 2
#define TCSANOW 0
#define TCSADRAIN 1
#define TCSAFLUSH 2
#define EXTA 0000016
#define EXTB 0000017
#define CBAUD 0010017
#define CBAUDEX 0010000
#define CIBAUD 002003600000
#define CMSPAR 010000000000
#define CRTSCTS 020000000000
#define XCASE 0000004
#define ECHOCTL 0001000
#define ECHOPRT 0002000
#define ECHOKE 0004000
#define FLUSHO 0010000
#define PENDIN 0040000
#define EXTPROC 0200000
/* intr=^C quit=^| erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
start=^Q stop=^S susp=^Z eol=\0
reprint=^R discard=^U werase=^W lnext=^V
eol2=\0
*/
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
speed_t cfgetospeed (const struct termios *);
speed_t cfgetispeed (const struct termios *);
int cfsetospeed (struct termios *, speed_t);
int cfsetispeed (struct termios *, speed_t);
int tcgetattr (int, struct termios *);
int tcsetattr (int, int, const struct termios *);
int tcsendbreak (int, int);
int tcdrain (int);
int tcflush (int, int);
int tcflow (int, int);
pid_t tcgetsid (int);
void cfmakeraw(struct termios *);
int cfsetspeed(struct termios *, speed_t);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,14 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = []
CPPPATH = [cwd]
if GetDepend('RT_USING_DFS'):
src += ['timerfd.c']
group = DefineGroup('POSIX', src, depend = ['RT_USING_POSIX_TIMERFD'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,562 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-20 zmq810150896 first version
*/
#include <rtthread.h>
#include <dfs_file.h>
#include <stdint.h>
#include <poll.h>
#include <sys/timerfd.h>
#define DBG_TAG "TIMERFD"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#define INIT_PERIODIC 0
#define OPEN_PERIODIC 1
#define ENTER_PERIODIC 2
#define SEC_TO_MSEC 1000
#define MSEC_TO_NSEC 1000000
#define SEC_TO_NSEC 1000000000
#define TIME_INT32_MAX 0x7FFFFFFF
#define TIMERFD_MUTEX_NAME "TIMERFD"
#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
struct rt_timerfd
{
rt_wqueue_t timerfd_queue;
struct itimerspec ittimer;
rt_timer_t timer;
struct rt_mutex lock;
struct timespec pre_time;
rt_atomic_t timeout_num;
struct rt_wqueue_node wqn;
rt_atomic_t ticks;
int clockid;
int isperiodic;
int tick_out;
};
static int timerfd_close(struct dfs_file *file);
static int timerfd_poll(struct dfs_file *file, struct rt_pollreq *req);
#ifndef RT_USING_DFS_V2
static ssize_t timerfd_read(struct dfs_file *file, void *buf, size_t count);
#else
static ssize_t timerfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos);
#endif
static const struct dfs_file_ops timerfd_fops =
{
.close = timerfd_close,
.poll = timerfd_poll,
.read = timerfd_read,
};
static int timerfd_close(struct dfs_file *file)
{
struct rt_timerfd *tfd;
if (file->vnode->ref_count != 1)
return 0;
tfd = file->vnode->data;
if (tfd)
{
if (tfd->timer != RT_NULL)
{
rt_timer_stop(tfd->timer);
rt_timer_delete(tfd->timer);
tfd->timer = RT_NULL;
}
if (tfd->wqn.wqueue)
{
rt_wqueue_remove(&tfd->wqn);
}
rt_mutex_detach(&tfd->lock);
rt_free(tfd);
}
return 0;
}
static int timerfd_poll(struct dfs_file *file, struct rt_pollreq *req)
{
struct rt_timerfd *tfd;
int events = 0;
tfd = file->vnode->data;
rt_mutex_take(&tfd->lock, RT_WAITING_FOREVER);
rt_poll_add(&tfd->timerfd_queue, req);
rt_mutex_release(&tfd->lock);
if (rt_atomic_load(&(tfd->ticks)) > 0)
{
events |= POLLIN;
}
return events;
}
#ifndef RT_USING_DFS_V2
static ssize_t timerfd_read(struct dfs_file *file, void *buf, size_t count)
#else
static ssize_t timerfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
#endif
{
struct rt_timerfd *tfd;
rt_uint64_t *buffer;
int ret = 0;
buffer = (rt_uint64_t *)buf;
if (sizeof(buffer) > count)
{
rt_set_errno(EINVAL);
return -1;
}
tfd = file->vnode->data;
if (!tfd)
{
rt_set_errno(EINVAL);
return -1;
}
if ((rt_atomic_load(&(tfd->ticks)) == 0) && (file->flags & O_NONBLOCK))
{
rt_set_errno(EAGAIN);
return -EAGAIN;
}
else
{
if (rt_atomic_load(&(tfd->ticks)) == 0)
{
tfd->wqn.polling_thread = rt_thread_self();
if (tfd->wqn.wqueue)
{
rt_wqueue_remove(&tfd->wqn);
}
rt_wqueue_add(&tfd->timerfd_queue, &tfd->wqn);
ret = rt_thread_suspend_with_flag(tfd->wqn.polling_thread, RT_INTERRUPTIBLE);
if (ret == RT_EOK)
{
rt_schedule();
}
else
{
return ret;
}
}
(*buffer) = rt_atomic_load(&(tfd->timeout_num));
rt_atomic_store(&(tfd->timeout_num), 0);
rt_atomic_store(&(tfd->ticks), 0);
}
return sizeof(buffer);
}
static int timerfd_wqueue_callback(struct rt_wqueue_node *wait, void *key)
{
return 0;
}
static int timerfd_do_create(int clockid, int flags)
{
struct rt_timerfd *tfd = RT_NULL;
struct dfs_file *df;
rt_err_t ret = -1;
int fd = -1;
if ((flags & ~TFD_SHARED_FCNTL_FLAGS) ||
(clockid != CLOCK_MONOTONIC &&
clockid != CLOCK_REALTIME &&
clockid != CLOCK_REALTIME_ALARM &&
clockid != CLOCK_BOOTTIME &&
clockid != CLOCK_BOOTTIME_ALARM))
{
rt_set_errno(EINVAL);
return -1;
}
if ((clockid == CLOCK_REALTIME_ALARM ||
clockid == CLOCK_BOOTTIME_ALARM))
{
rt_set_errno(EPERM);
return -1;
}
fd = fd_new();
if (fd < 0)
{
rt_set_errno(EINVAL);
return -1;
}
ret = fd;
df = fd_get(fd);
if (df)
{
df->flags |= flags;
tfd = (struct rt_timerfd *)rt_calloc(1, sizeof(struct rt_timerfd));
if (tfd)
{
rt_mutex_init(&tfd->lock, TIMERFD_MUTEX_NAME, RT_IPC_FLAG_FIFO);
rt_wqueue_init(&tfd->timerfd_queue);
tfd->isperiodic = INIT_PERIODIC;
tfd->ticks = 0;
tfd->timeout_num = 0;
tfd->tick_out = 0;
tfd->clockid = clockid;
tfd->timer = RT_NULL;
tfd->pre_time.tv_sec = 0;
tfd->pre_time.tv_nsec = 0;
tfd->wqn.polling_thread = rt_thread_self();
rt_list_init(&(tfd->wqn.list));
tfd->wqn.wakeup = timerfd_wqueue_callback;
df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
if (df->vnode)
{
dfs_vnode_init(df->vnode, FT_REGULAR, &timerfd_fops);
df->vnode->data = tfd;
#ifdef RT_USING_DFS_V2
df->fops = &timerfd_fops;
#endif
}
else
{
rt_free(tfd);
fd_release(fd);
rt_set_errno(ENOMEM);
ret = -1;
}
}
else
{
fd_release(fd);
rt_set_errno(ENOMEM);
ret = -1;
}
}
else
{
fd_release(fd);
ret = -1;
}
return ret;
}
static int get_current_time(struct rt_timerfd *tfd, struct timespec *time)
{
int ret = 0;
struct timespec *cur_time = RT_NULL;
if (time == RT_NULL)
{
cur_time = &tfd->pre_time;
}
else
{
cur_time = time;
}
if (tfd->clockid >= 0)
{
ret = clock_gettime(tfd->clockid, cur_time);
}
else
{
ret = clock_gettime(CLOCK_MONOTONIC, cur_time);
}
return ret;
}
static void timerfd_timeout(void *parameter)
{
struct rt_timerfd *tfd = RT_NULL;
tfd = (struct rt_timerfd *)parameter;
if (tfd == RT_NULL)
{
return ;
}
rt_wqueue_wakeup(&tfd->timerfd_queue, (void *)POLLIN);
rt_atomic_store(&(tfd->ticks), 1);
rt_atomic_add(&(tfd->timeout_num), 1);
rt_mutex_take(&tfd->lock, RT_WAITING_FOREVER);
get_current_time(tfd, RT_NULL);
if (tfd->isperiodic == OPEN_PERIODIC)
{
if (tfd->timer)
{
rt_timer_stop(tfd->timer);
rt_timer_delete(tfd->timer);
tfd->timer = RT_NULL;
}
tfd->isperiodic = ENTER_PERIODIC;
tfd->timer = rt_timer_create(TIMERFD_MUTEX_NAME, timerfd_timeout,
tfd, tfd->tick_out,
RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_SOFT_TIMER);
if (tfd->timer == RT_NULL)
{
LOG_E("rt_timer_create fail \n");
rt_mutex_release(&tfd->lock);
return ;
}
rt_timer_start(tfd->timer);
}
rt_mutex_release(&tfd->lock);
}
static void timerfd_time_operation(time_t *sec, long *nsec)
{
if (*nsec < 0)
{
if (*sec > 0)
{
*sec -= 1;
*nsec = 1 * SEC_TO_NSEC + *nsec;
}
}
if (*sec < 0 || *nsec < 0)
{
*sec = 0;
*nsec = 0;
}
}
static int timerfd_do_settime(int fd, int flags, const struct itimerspec *new, struct itimerspec *old)
{
int ret = 0;
struct rt_timerfd *tfd;
struct dfs_file *df;
struct timespec current_time;
int tick_out;
rt_int64_t value_msec;
rt_int64_t interval_msec;
rt_int64_t cur_time = 0;
if (fd < 0)
{
rt_set_errno(EINVAL);
return -EINVAL;
}
df = fd_get(fd);
if (!df)
return -EINVAL;
tfd = df->vnode->data;
rt_atomic_store(&(tfd->ticks), 0);
rt_atomic_store(&(tfd->timeout_num), 0);
rt_mutex_take(&tfd->lock, RT_WAITING_FOREVER);
tfd->isperiodic = INIT_PERIODIC;
if (old)
{
old->it_interval.tv_nsec = tfd->ittimer.it_interval.tv_nsec;
old->it_interval.tv_sec = tfd->ittimer.it_interval.tv_sec;
old->it_value.tv_nsec = tfd->ittimer.it_value.tv_nsec;
old->it_value.tv_sec = tfd->ittimer.it_value.tv_sec;
}
if (new)
{
if (tfd->timer != RT_NULL)
{
rt_timer_stop(tfd->timer);
rt_timer_delete(tfd->timer);
tfd->timer = RT_NULL;
}
if (new->it_value.tv_nsec == 0 && new->it_value.tv_sec == 0)
{
rt_mutex_release(&tfd->lock);
return 0;
}
value_msec = (new->it_value.tv_nsec / MSEC_TO_NSEC) + (new->it_value.tv_sec * SEC_TO_MSEC);
interval_msec = (new->it_interval.tv_nsec / MSEC_TO_NSEC) + (new->it_interval.tv_sec * SEC_TO_MSEC);
current_time.tv_nsec = 0;
current_time.tv_sec = 0;
if (flags == TFD_TIMER_ABSTIME)
{
ret = get_current_time(tfd, &current_time);
if (ret < 0)
{
rt_mutex_release(&tfd->lock);
return ret;
}
cur_time = current_time.tv_sec * SEC_TO_MSEC + (current_time.tv_nsec / MSEC_TO_NSEC);
value_msec = value_msec - cur_time;
}
tfd->ittimer.it_interval.tv_nsec = new->it_interval.tv_nsec;
tfd->ittimer.it_interval.tv_sec = new->it_interval.tv_sec;
tfd->ittimer.it_value.tv_sec = new->it_value.tv_sec - current_time.tv_sec;
tfd->ittimer.it_value.tv_nsec = new->it_value.tv_nsec - current_time.tv_nsec;
timerfd_time_operation(&tfd->ittimer.it_value.tv_sec, &tfd->ittimer.it_value.tv_nsec);
if ((interval_msec > 0) && (interval_msec <= TIME_INT32_MAX))
{
tfd->tick_out = rt_tick_from_millisecond(interval_msec);
if (tfd->tick_out < 0)
{
rt_mutex_release(&tfd->lock);
return -EINVAL;
}
tfd->isperiodic = OPEN_PERIODIC;
}
get_current_time(tfd, RT_NULL);
if (value_msec > 0)
{
if (value_msec > TIME_INT32_MAX)
{
rt_mutex_release(&tfd->lock);
return -EINVAL;
}
tick_out = rt_tick_from_millisecond(value_msec);
if (tick_out < 0)
{
rt_mutex_release(&tfd->lock);
return -EINVAL;
}
tfd->timer = rt_timer_create(TIMERFD_MUTEX_NAME, timerfd_timeout,
tfd, tick_out,
RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_SOFT_TIMER);
if (tfd->timer == RT_NULL)
{
LOG_E("rt_timer_create fail \n");
rt_mutex_release(&tfd->lock);
return -ENOMEM;
}
rt_timer_start(tfd->timer);
}
else
{
timerfd_timeout(tfd);
}
}
else
{
rt_set_errno(EINVAL);
ret = -1;
}
rt_mutex_release(&tfd->lock);
return ret;
}
static int timerfd_do_gettime(int fd, struct itimerspec *cur)
{
struct rt_timerfd *tfd;
struct dfs_file *df = RT_NULL;
struct timespec cur_time;
rt_int64_t tv_sec = 0;
rt_int64_t tv_nsec = 0;
df = fd_get(fd);
if (df == RT_NULL)
{
rt_set_errno(EINVAL);
return -1;
}
tfd = df->vnode->data;
get_current_time(tfd, &cur_time);
rt_mutex_take(&tfd->lock, RT_WAITING_FOREVER);
tv_sec = cur_time.tv_sec - tfd->pre_time.tv_sec;
tv_nsec = cur_time.tv_nsec - tfd->pre_time.tv_nsec;
timerfd_time_operation(&tv_sec, &tv_nsec);
cur->it_interval.tv_nsec = tfd->ittimer.it_interval.tv_nsec;
cur->it_interval.tv_sec = tfd->ittimer.it_interval.tv_sec;
if (tfd->isperiodic == ENTER_PERIODIC)
{
cur->it_value.tv_nsec = tfd->ittimer.it_interval.tv_nsec - tv_nsec;
cur->it_value.tv_sec = tfd->ittimer.it_interval.tv_sec - tv_sec;
timerfd_time_operation(&cur->it_value.tv_sec, &cur->it_value.tv_nsec);
}
else
{
if (rt_atomic_load(&(tfd->timeout_num)) == 1)
{
cur->it_value.tv_nsec = 0;
cur->it_value.tv_sec = 0;
}
else
{
cur->it_value.tv_nsec = tfd->ittimer.it_value.tv_nsec - tv_nsec;
cur->it_value.tv_sec = tfd->ittimer.it_value.tv_sec - tv_sec;
timerfd_time_operation(&cur->it_value.tv_sec, &cur->it_value.tv_nsec);
}
}
rt_mutex_release(&tfd->lock);
return 0;
}
int timerfd_create(int clockid, int flags)
{
return timerfd_do_create(clockid, flags);
}
int timerfd_settime(int fd, int flags, const struct itimerspec *new, struct itimerspec *old)
{
return timerfd_do_settime(fd, flags, new, old);
}
int timerfd_gettime(int fd, struct itimerspec *cur)
{
return timerfd_do_gettime(fd, cur);
}