This commit is contained in:
2024-08-05 20:57:09 +08:00
commit 46d9ee7795
3020 changed files with 1725767 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,713 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* (tty_compat.c)
* The compatible layer which interacts with process management core (lwp)
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#define DBG_TAG "lwp.tty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "../tty_config.h"
#include "../tty_internal.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 1994-1995 Søren Schmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* is the tty and session leader already binding ? */
static rt_bool_t _is_already_binding(lwp_tty_t tp, rt_lwp_t p)
{
rt_bool_t rc;
rt_processgroup_t pgrp = p->pgrp;
/* lwp is already locked */
RT_ASSERT(pgrp);
/* Note: pgrp->session is constant after process group is created */
if (tp->t_session && tp->t_session == pgrp->session)
{
rc = RT_TRUE;
}
else
{
rc = RT_FALSE;
}
return rc;
}
static rt_bool_t _is_tty_or_sess_busy(lwp_tty_t tp, rt_lwp_t p)
{
rt_bool_t rc;
rt_session_t sess = p->pgrp->session;
SESS_LOCK(sess);
if (sess->ctty)
{
rc = RT_TRUE;
}
else if (tp->t_session)
{
/**
* TODO: allow TTY stolen if the sess leader is killed while resource
* had not been collected
*/
if (tp->t_session->leader == RT_NULL)
rc = RT_FALSE;
else
rc = RT_TRUE;
}
else
{
rc = RT_FALSE;
}
SESS_UNLOCK(sess);
return rc;
}
int lwp_tty_bg_stop(struct lwp_tty *tp, struct rt_condvar *cv)
{
int error;
int revokecnt = tp->t_revokecnt;
rt_lwp_t self_lwp;
rt_thread_t header_thr;
rt_thread_t cur_thr = rt_thread_self();
int jobctl_stopped;
self_lwp = cur_thr->lwp;
RT_ASSERT(self_lwp);
jobctl_stopped = self_lwp->jobctl_stopped;
tty_lock_assert(tp, MA_OWNED | MA_NOTRECURSED);
MPASS(!tty_gone(tp));
LWP_LOCK(self_lwp);
header_thr = rt_list_entry(self_lwp->t_grp.prev, struct rt_thread, sibling);
if (!jobctl_stopped && header_thr == cur_thr &&
cur_thr->sibling.prev == &self_lwp->t_grp)
{
/* update lwp status */
jobctl_stopped = self_lwp->jobctl_stopped = RT_TRUE;
}
LWP_UNLOCK(self_lwp);
error = cv_wait(cv, tp->t_mtx);
if (jobctl_stopped)
{
self_lwp->jobctl_stopped = RT_FALSE;
}
/* Bail out when the device slipped away. */
if (tty_gone(tp))
return -ENXIO;
/* Restart the system call when we may have been revoked. */
if (tp->t_revokecnt != revokecnt)
return -ERESTART;
return error;
}
/* process management */
int lwp_tty_set_ctrl_proc(lwp_tty_t tp, rt_thread_t td)
{
int rc = -1;
struct rt_lwp *p = td->lwp;
tty_unlock(tp);
LWP_LOCK(p);
tty_lock(tp);
if (is_sess_leader(p))
{
if (_is_already_binding(tp, p))
{
rc = 0;
}
else if (_is_tty_or_sess_busy(tp, p))
{
rc = -EPERM;
}
else
{
/**
* Binding controlling process
* note: p->pgrp is protected by lwp lock;
* pgrp->session is always constant.
*/
tp->t_session = p->pgrp->session;
tp->t_session->ctty = tp;
tp->t_sessioncnt++;
/* Assign foreground process group */
tp->t_pgrp = p->pgrp;
p->term_ctrlterm = RT_TRUE;
LOG_D("%s(sid=%d)", __func__, tp->t_session->sid);
rc = 0;
}
}
else
{
rc = -EPERM;
}
LWP_UNLOCK(p);
return rc;
}
int lwp_tty_assign_foreground(lwp_tty_t tp, rt_thread_t td, int pgid)
{
struct rt_processgroup *pg;
rt_lwp_t cur_lwp = td->lwp;
tty_unlock(tp);
pg = lwp_pgrp_find_and_inc_ref(pgid);
if (pg == NULL || cur_lwp == NULL)
{
tty_lock(tp);
return -EPERM;
}
else
{
PGRP_LOCK(pg);
if (pg->sid != cur_lwp->sid)
{
PGRP_UNLOCK(pg);
lwp_pgrp_dec_ref(pg);
LOG_D("%s: NoPerm current process (pid=%d, pgid=%d, sid=%d), "
"tagget group (pgid=%d, sid=%d)", __func__,
cur_lwp->pid, cur_lwp->pgid, cur_lwp->sid, pgid, pg->sid);
tty_lock(tp);
return -EPERM;
}
}
tty_lock(tp);
/**
* Determine if this TTY is the controlling TTY after
* relocking the TTY.
*/
if (!tty_is_ctty(tp, td->lwp))
{
PGRP_UNLOCK(pg);
LOG_D("%s: NoCTTY current process (pid=%d, pgid=%d, sid=%d), "
"tagget group (pgid=%d, sid=%d)", __func__,
cur_lwp->pid, cur_lwp->pgid, cur_lwp->sid, pgid, pg->sid);
return -ENOTTY;
}
tp->t_pgrp = pg;
PGRP_UNLOCK(pg);
lwp_pgrp_dec_ref(pg);
/* Wake up the background process groups. */
cv_broadcast(&tp->t_bgwait);
LOG_D("%s: Foreground group %p (pgid=%d)", __func__, tp->t_pgrp,
tp->t_pgrp ? tp->t_pgrp->pgid : -1);
return 0;
}
/**
* Signalling processes.
*/
void lwp_tty_signal_sessleader(struct lwp_tty *tp, int sig)
{
struct rt_lwp *p;
struct rt_session *s;
tty_assert_locked(tp);
MPASS(sig >= 1 && sig < _LWP_NSIG);
/* Make signals start output again. */
tp->t_flags &= ~TF_STOPPED;
tp->t_termios.c_lflag &= ~FLUSHO;
/**
* Load s.leader exactly once to avoid race where s.leader is
* set to NULL by a concurrent invocation of killjobc() by the
* session leader. Note that we are not holding t_session's
* lock for the read.
*/
if ((s = tp->t_session) != NULL &&
(p = (void *)rt_atomic_load((rt_atomic_t *)&s->leader)) != NULL)
{
lwp_signal_kill(p, sig, SI_KERNEL, 0);
}
}
void lwp_tty_signal_pgrp(struct lwp_tty *tp, int sig)
{
tty_assert_locked(tp);
MPASS(sig >= 1 && sig < _LWP_NSIG);
/* Make signals start output again. */
tp->t_flags &= ~TF_STOPPED;
tp->t_termios.c_lflag &= ~FLUSHO;
#ifdef USING_BSD_SIGINFO
if (sig == SIGINFO && !(tp->t_termios.c_lflag & NOKERNINFO))
tty_info(tp);
#endif /* USING_BSD_SIGINFO */
if (tp->t_pgrp != NULL)
{
PGRP_LOCK(tp->t_pgrp);
lwp_pgrp_signal_kill(tp->t_pgrp, sig, SI_KERNEL, 0);
PGRP_UNLOCK(tp->t_pgrp);
}
}
/* bsd_ttydev_methods.d_ioctl */
rt_inline size_t _copy_to_user(void *to, void *from, size_t n)
{
return lwp_put_to_user(to, from, n) == n ? 0 : -EFAULT;
}
rt_inline size_t _copy_from_user(void *to, void *from, size_t n)
{
return lwp_get_from_user(to, from, n) == n ? 0 : -EFAULT;
}
static void termios_to_termio(struct termios *tios, struct termio *tio)
{
memset(tio, 0, sizeof(*tio));
tio->c_iflag = tios->c_iflag;
tio->c_oflag = tios->c_oflag;
tio->c_cflag = tios->c_cflag;
tio->c_lflag = tios->c_lflag;
tio->c_line = tios->c_line;
memcpy(tio->c_cc, tios->c_cc, NCC);
}
static void termio_to_termios(struct termio *tio, struct termios *tios)
{
int i;
tios->c_iflag = tio->c_iflag;
tios->c_oflag = tio->c_oflag;
tios->c_cflag = tio->c_cflag;
tios->c_lflag = tio->c_lflag;
for (i = NCC; i < NCCS; i++)
tios->c_cc[i] = _POSIX_VDISABLE;
memcpy(tios->c_cc, tio->c_cc, NCC);
}
#define IOCTL(cmd, data, fflags, td) \
bsd_ttydev_methods.d_ioctl(tp, cmd, data, fflags, td)
int lwp_tty_ioctl_adapter(lwp_tty_t tp, int cmd, int oflags, void *args, rt_thread_t td)
{
long fflags = FFLAGS(oflags);
struct termios tios;
struct termio tio;
int error;
LOG_D("%s(cmd=0x%x, args=%p)", __func__, cmd, args);
switch (cmd & 0xffff)
{
case TCGETS:
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags, td);
if (error)
break;
cfsetospeed(&tios, tios.__c_ispeed);
error = _copy_to_user(args, &tios, sizeof(tios));
break;
case TCSETS:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
tios.__c_ispeed = tios.__c_ospeed = cfgetospeed(&tios);
error = (IOCTL(TIOCSETA, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETSW:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
error = (IOCTL(TIOCSETAW, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETSF:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
error = (IOCTL(TIOCSETAF, (rt_caddr_t)&tios, fflags, td));
break;
case TCGETA:
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags, td);
if (error)
break;
termios_to_termio(&tios, &tio);
error = _copy_to_user((void *)args, &tio, sizeof(tio));
break;
case TCSETA:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETA, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETAW:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETAW, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETAF:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETAF, (rt_caddr_t)&tios, fflags, td));
break;
case TCSBRK:
if (args != 0)
{
/**
* Linux manual: SVr4, UnixWare, Solaris, and Linux treat
* tcsendbreak(fd,arg) with nonzero arg like tcdrain(fd).
*/
error = IOCTL(TIOCDRAIN, (rt_caddr_t)&tios, fflags, td);
}
else
{
/**
* Linux manual: If the terminal is using asynchronous serial
* data transmission, and arg is zero, then send a break (a
* stream of zero bits) for between 0.25 and 0.5 seconds.
*/
LOG_D("%s: ioctl TCSBRK arg 0 not implemented", __func__);
error = -ENOSYS;
}
break;
#ifdef USING_BSD_IOCTL_EXT
/* Software flow control */
case TCXONC: {
switch (args->arg)
{
case TCOOFF:
args->cmd = TIOCSTOP;
break;
case TCOON:
args->cmd = TIOCSTART;
break;
case TCIOFF:
case TCION: {
int c;
struct write_args wr;
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags,
td);
if (error)
break;
fdrop(fp, td);
c = (args->arg == TCIOFF) ? VSTOP : VSTART;
c = tios.c_cc[c];
if (c != _POSIX_VDISABLE)
{
wr.fd = args->fd;
wr.buf = &c;
wr.nbyte = sizeof(c);
return (sys_write(td, &wr));
}
else
return 0;
}
default:
fdrop(fp, td);
return -EINVAL;
}
args->arg = 0;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
}
#endif /* USING_BSD_IOCTL_EXT */
case TCFLSH: {
int val;
error = 0;
switch ((rt_base_t)args)
{
case TCIFLUSH:
val = FREAD;
break;
case TCOFLUSH:
val = FWRITE;
break;
case TCIOFLUSH:
val = FREAD | FWRITE;
break;
default:
error = -EINVAL;
break;
}
if (!error)
error = (IOCTL(TIOCFLUSH, (rt_caddr_t)&val, fflags, td));
break;
}
#ifdef USING_BSD_IOCTL_EXT
case TIOCEXCL:
args->cmd = TIOCEXCL;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCNXCL:
args->cmd = TIOCNXCL;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
#endif /* USING_BSD_IOCTL_EXT */
/* Controlling terminal */
case TIOCSCTTY:
case TIOCNOTTY:
/* Process group and session ID */
case TIOCGPGRP:
case TIOCSPGRP:
case TIOCGSID:
/* TIOCOUTQ */
/* TIOCSTI */
case TIOCGWINSZ:
case TIOCSWINSZ:
error = IOCTL(cmd, (rt_caddr_t)args, fflags, td);
break;
#ifdef USING_BSD_IOCTL_EXT
case TIOCMGET:
args->cmd = TIOCMGET;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMBIS:
args->cmd = TIOCMBIS;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMBIC:
args->cmd = TIOCMBIC;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMSET:
args->cmd = TIOCMSET;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
#endif /* USING_BSD_IOCTL_EXT */
/* TIOCGSOFTCAR */
/* TIOCSSOFTCAR */
case FIONREAD: /* TIOCINQ */
error = (IOCTL(FIONREAD, args, fflags, td));
break;
#ifdef USING_BSD_IOCTL_EXT
/* TIOCLINUX */
case TIOCCONS:
args->cmd = TIOCCONS;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCGSERIAL: {
struct linux_serial_struct lss;
bzero(&lss, sizeof(lss));
lss.type = PORT_16550A;
lss.flags = 0;
lss.close_delay = 0;
error = copyout(&lss, (void *)args->arg, sizeof(lss));
break;
}
case TIOCSSERIAL: {
struct linux_serial_struct lss;
error = copyin((void *)args->arg, &lss, sizeof(lss));
if (error)
break;
/* XXX - It really helps to have an implementation that
* does nothing. NOT!
*/
error = 0;
break;
}
case TIOCPKT:
args->cmd = TIOCPKT;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIONBIO:
args->cmd = FIONBIO;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCSETD: {
int line;
switch (args->arg)
{
case N_TTY:
line = TTYDISC;
break;
case N_SLIP:
line = SLIPDISC;
break;
case N_PPP:
line = PPPDISC;
break;
default:
fdrop(fp, td);
return -EINVAL;
}
error = (ioctl_emit(TIOCSETD, (rt_caddr_t)&line, fflags, td));
break;
}
case TIOCGETD: {
int linux_line;
int bsd_line = TTYDISC;
error =
ioctl_emit(TIOCGETD, (rt_caddr_t)&bsd_line, fflags, td);
if (error)
break;
switch (bsd_line)
{
case TTYDISC:
linux_line = N_TTY;
break;
case SLIPDISC:
linux_line = N_SLIP;
break;
case PPPDISC:
linux_line = N_PPP;
break;
default:
fdrop(fp, td);
return -EINVAL;
}
error = (copyout(&linux_line, (void *)args->arg, sizeof(int)));
break;
}
/* TCSBRKP */
/* TIOCTTYGSTRUCT */
case FIONCLEX:
args->cmd = FIONCLEX;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIOCLEX:
args->cmd = FIOCLEX;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIOASYNC:
args->cmd = FIOASYNC;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
/* TIOCSERCONFIG */
/* TIOCSERGWILD */
/* TIOCSERSWILD */
/* TIOCGLCKTRMIOS */
/* TIOCSLCKTRMIOS */
case TIOCSBRK:
args->cmd = TIOCSBRK;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCCBRK:
args->cmd = TIOCCBRK;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCGPTN: {
int nb;
error = ioctl_emit(TIOCGPTN, (rt_caddr_t)&nb, fflags, td);
if (!error)
error = copyout(&nb, (void *)args->arg, sizeof(int));
break;
}
case TIOCGPTPEER:
linux_msg(td, "unsupported ioctl TIOCGPTPEER");
error = -ENOIOCTL;
break;
case TIOCSPTLCK:
/*
* Our unlockpt() does nothing. Check that fd refers
* to a pseudo-terminal master device.
*/
args->cmd = TIOCPTMASTER;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
#endif /* USING_BSD_IOCTL_EXT */
/**
* those are for current implementation of devfs, and we dont want to
* log them
*/
case F_DUPFD:
case F_DUPFD_CLOEXEC:
case F_GETFD:
case F_SETFD:
case F_GETFL:
case F_SETFL:
/* fall back to fs */
error = -ENOIOCTL;
break;
default:
LOG_I("%s: unhandle commands 0x%x", __func__, cmd);
error = -ENOSYS;
break;
}
return (error);
}

View File

@@ -0,0 +1,507 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#include "../bsd_porting.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* TTY input queue buffering.
*
* Unlike the output queue, the input queue has more features that are
* needed to properly implement various features offered by the TTY
* interface:
*
* - Data can be removed from the tail of the queue, which is used to
* implement backspace.
* - Once in a while, input has to be `canonicalized'. When ICANON is
* turned on, this will be done after a CR has been inserted.
* Otherwise, it should be done after any character has been inserted.
* - The input queue can store one bit per byte, called the quoting bit.
* This bit is used by TTYDISC to make backspace work on quoted
* characters.
*
* In most cases, there is probably less input than output, so unlike
* the outq, we'll stick to 128 byte blocks here.
*/
static int ttyinq_flush_secure = 1;
#define TTYINQ_QUOTESIZE (TTYINQ_DATASIZE / BMSIZE)
#define BMSIZE 32
#define GETBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] & (1 << ((boff) % BMSIZE)))
#define SETBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] |= (1 << ((boff) % BMSIZE)))
#define CLRBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] &= ~(1 << ((boff) % BMSIZE)))
struct ttyinq_block
{
struct ttyinq_block *tib_prev;
struct ttyinq_block *tib_next;
uint32_t tib_quotes[TTYINQ_QUOTESIZE];
char tib_data[TTYINQ_DATASIZE];
};
static uma_zone_t ttyinq_zone;
#define TTYINQ_INSERT_TAIL(ti, tib) \
do \
{ \
if (ti->ti_end == 0) \
{ \
tib->tib_prev = NULL; \
tib->tib_next = ti->ti_firstblock; \
ti->ti_firstblock = tib; \
} \
else \
{ \
tib->tib_prev = ti->ti_lastblock; \
tib->tib_next = ti->ti_lastblock->tib_next; \
ti->ti_lastblock->tib_next = tib; \
} \
if (tib->tib_next != NULL) \
tib->tib_next->tib_prev = tib; \
ti->ti_nblocks++; \
} while (0)
#define TTYINQ_REMOVE_HEAD(ti) \
do \
{ \
ti->ti_firstblock = ti->ti_firstblock->tib_next; \
if (ti->ti_firstblock != NULL) \
ti->ti_firstblock->tib_prev = NULL; \
ti->ti_nblocks--; \
} while (0)
#define TTYINQ_RECYCLE(ti, tib) \
do \
{ \
if (ti->ti_quota <= ti->ti_nblocks) \
uma_zfree(ttyinq_zone, tib); \
else \
TTYINQ_INSERT_TAIL(ti, tib); \
} while (0)
int ttyinq_setsize(struct ttyinq *ti, struct lwp_tty *tp, size_t size)
{
struct ttyinq_block *tib;
ti->ti_quota = howmany(size, TTYINQ_DATASIZE);
while (ti->ti_quota > ti->ti_nblocks)
{
/*
* List is getting bigger.
* Add new blocks to the tail of the list.
*
* We must unlock the TTY temporarily, because we need
* to allocate memory. This won't be a problem, because
* in the worst case, another thread ends up here, which
* may cause us to allocate too many blocks, but this
* will be caught by the loop below.
*/
tty_unlock(tp);
tib = uma_zalloc(ttyinq_zone, M_WAITOK);
tty_lock(tp);
if (tty_gone(tp))
{
uma_zfree(ttyinq_zone, tib);
return -ENXIO;
}
TTYINQ_INSERT_TAIL(ti, tib);
}
return 0;
}
void ttyinq_free(struct ttyinq *ti)
{
struct ttyinq_block *tib;
ttyinq_flush(ti);
ti->ti_quota = 0;
while ((tib = ti->ti_firstblock) != NULL)
{
TTYINQ_REMOVE_HEAD(ti);
uma_zfree(ttyinq_zone, tib);
}
MPASS(ti->ti_nblocks == 0);
}
int ttyinq_read_uio(struct ttyinq *ti, struct lwp_tty *tp, struct uio *uio,
size_t rlen, size_t flen)
{
MPASS(rlen <= uio->uio_resid);
while (rlen > 0)
{
int error;
struct ttyinq_block *tib;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (ti->ti_begin == ti->ti_linestart)
return 0;
tib = ti->ti_firstblock;
if (tib == NULL)
return 0;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = ti->ti_begin;
cend = MIN(MIN(ti->ti_linestart, ti->ti_begin + rlen), TTYINQ_DATASIZE);
clen = cend - cbegin;
MPASS(clen >= flen);
rlen -= clen;
/*
* We can prevent buffering in some cases:
* - We need to read the block until the end.
* - We don't need to read the block until the end, but
* there is no data beyond it, which allows us to move
* the write pointer to a new block.
*/
if (cend == TTYINQ_DATASIZE || cend == ti->ti_end)
{
/*
* Fast path: zero copy. Remove the first block,
* so we can unlock the TTY temporarily.
*/
TTYINQ_REMOVE_HEAD(ti);
ti->ti_begin = 0;
/*
* Because we remove the first block, we must
* fix up the block offsets.
*/
#define CORRECT_BLOCK(t) \
do \
{ \
if (t <= TTYINQ_DATASIZE) \
t = 0; \
else \
t -= TTYINQ_DATASIZE; \
} while (0)
CORRECT_BLOCK(ti->ti_linestart);
CORRECT_BLOCK(ti->ti_reprint);
CORRECT_BLOCK(ti->ti_end);
#undef CORRECT_BLOCK
/*
* Temporary unlock and copy the data to
* userspace. We may need to flush trailing
* bytes, like EOF characters.
*/
tty_unlock(tp);
error = uiomove(tib->tib_data + cbegin, clen - flen, uio);
tty_lock(tp);
/* Block can now be readded to the list. */
TTYINQ_RECYCLE(ti, tib);
}
else
{
char ob[TTYINQ_DATASIZE - 1];
/*
* Slow path: store data in a temporary buffer.
*/
memcpy(ob, tib->tib_data + cbegin, clen - flen);
ti->ti_begin += clen;
MPASS(ti->ti_begin < TTYINQ_DATASIZE);
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(ob, clen - flen, uio);
tty_lock(tp);
}
if (error != 0)
return error;
if (tty_gone(tp))
return -ENXIO;
}
return 0;
}
rt_inline void ttyinq_set_quotes(struct ttyinq_block *tib, size_t offset,
size_t length, int value)
{
if (value)
{
/* Set the bits. */
for (; length > 0; length--, offset++) SETBIT(tib, offset);
}
else
{
/* Unset the bits. */
for (; length > 0; length--, offset++) CLRBIT(tib, offset);
}
}
size_t ttyinq_write(struct ttyinq *ti, const void *buf, size_t nbytes,
int quote)
{
const char *cbuf = buf;
struct ttyinq_block *tib;
unsigned int boff;
size_t l;
while (nbytes > 0)
{
boff = ti->ti_end % TTYINQ_DATASIZE;
if (ti->ti_end == 0)
{
/* First time we're being used or drained. */
MPASS(ti->ti_begin == 0);
tib = ti->ti_firstblock;
if (tib == NULL)
{
/* Queue has no blocks. */
break;
}
ti->ti_lastblock = tib;
}
else if (boff == 0)
{
/* We reached the end of this block on last write. */
tib = ti->ti_lastblock->tib_next;
if (tib == NULL)
{
/* We've reached the watermark. */
break;
}
ti->ti_lastblock = tib;
}
else
{
tib = ti->ti_lastblock;
}
/* Don't copy more than was requested. */
l = MIN(nbytes, TTYINQ_DATASIZE - boff);
MPASS(l > 0);
memcpy(tib->tib_data + boff, cbuf, l);
/* Set the quoting bits for the proper region. */
ttyinq_set_quotes(tib, boff, l, quote);
cbuf += l;
nbytes -= l;
ti->ti_end += l;
}
return (cbuf - (const char *)buf);
}
int ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t nbytes,
int quote)
{
size_t ret __unused;
if (ttyinq_bytesleft(ti) < nbytes)
return -1;
/* We should always be able to write it back. */
ret = ttyinq_write(ti, buf, nbytes, quote);
MPASS(ret == nbytes);
return 0;
}
void ttyinq_canonicalize(struct ttyinq *ti)
{
ti->ti_linestart = ti->ti_reprint = ti->ti_end;
ti->ti_startblock = ti->ti_reprintblock = ti->ti_lastblock;
}
size_t ttyinq_findchar(struct ttyinq *ti, const char *breakc, size_t maxlen,
char *lastc)
{
struct ttyinq_block *tib = ti->ti_firstblock;
unsigned int boff = ti->ti_begin;
unsigned int bend =
MIN(MIN(TTYINQ_DATASIZE, ti->ti_linestart), ti->ti_begin + maxlen);
MPASS(maxlen > 0);
if (tib == NULL)
return 0;
while (boff < bend)
{
if (strchr(breakc, tib->tib_data[boff]) && !GETBIT(tib, boff))
{
*lastc = tib->tib_data[boff];
return (boff - ti->ti_begin + 1);
}
boff++;
}
/* Not found - just process the entire block. */
return (bend - ti->ti_begin);
}
void ttyinq_flush(struct ttyinq *ti)
{
struct ttyinq_block *tib;
ti->ti_begin = 0;
ti->ti_linestart = 0;
ti->ti_reprint = 0;
ti->ti_end = 0;
/* Zero all data in the input queue to get rid of passwords. */
if (ttyinq_flush_secure)
{
for (tib = ti->ti_firstblock; tib != NULL; tib = tib->tib_next)
memset(&tib->tib_data, 0, sizeof tib->tib_data);
}
}
int ttyinq_peekchar(struct ttyinq *ti, char *c, int *quote)
{
unsigned int boff;
struct ttyinq_block *tib = ti->ti_lastblock;
if (ti->ti_linestart == ti->ti_end)
return -1;
MPASS(ti->ti_end > 0);
boff = (ti->ti_end - 1) % TTYINQ_DATASIZE;
*c = tib->tib_data[boff];
*quote = GETBIT(tib, boff);
return 0;
}
void ttyinq_unputchar(struct ttyinq *ti)
{
MPASS(ti->ti_linestart < ti->ti_end);
if (--ti->ti_end % TTYINQ_DATASIZE == 0)
{
/* Roll back to the previous block. */
ti->ti_lastblock = ti->ti_lastblock->tib_prev;
/*
* This can only fail if we are unputchar()'ing the
* first character in the queue.
*/
MPASS((ti->ti_lastblock == NULL) == (ti->ti_end == 0));
}
}
void ttyinq_reprintpos_set(struct ttyinq *ti)
{
ti->ti_reprint = ti->ti_end;
ti->ti_reprintblock = ti->ti_lastblock;
}
void ttyinq_reprintpos_reset(struct ttyinq *ti)
{
ti->ti_reprint = ti->ti_linestart;
ti->ti_reprintblock = ti->ti_startblock;
}
static void ttyinq_line_iterate(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator, void *data,
unsigned int offset, struct ttyinq_block *tib)
{
unsigned int boff;
/* Use the proper block when we're at the queue head. */
if (offset == 0)
tib = ti->ti_firstblock;
/* Iterate all characters and call the iterator function. */
for (; offset < ti->ti_end; offset++)
{
boff = offset % TTYINQ_DATASIZE;
MPASS(tib != NULL);
/* Call back the iterator function. */
iterator(data, tib->tib_data[boff], GETBIT(tib, boff));
/* Last byte iterated - go to the next block. */
if (boff == TTYINQ_DATASIZE - 1)
tib = tib->tib_next;
}
}
void ttyinq_line_iterate_from_linestart(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data)
{
ttyinq_line_iterate(ti, iterator, data, ti->ti_linestart,
ti->ti_startblock);
}
void ttyinq_line_iterate_from_reprintpos(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data)
{
ttyinq_line_iterate(ti, iterator, data, ti->ti_reprint,
ti->ti_reprintblock);
}
static int ttyinq_startup(void)
{
ttyinq_zone = uma_zcreate("ttyinq", sizeof(struct ttyinq_block), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, 0);
return 0;
}
INIT_PREV_EXPORT(ttyinq_startup);
#if 0
SYSINIT(ttyinq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyinq_startup, NULL);
#endif

View File

@@ -0,0 +1,370 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#include "../bsd_porting.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* TTY output queue buffering.
*
* The previous design of the TTY layer offered the so-called clists.
* These clists were used for both the input queues and the output
* queue. We don't use certain features on the output side, like quoting
* bits for parity marking and such. This mechanism is similar to the
* old clists, but only contains the features we need to buffer the
* output.
*/
struct ttyoutq_block
{
struct ttyoutq_block *tob_next;
char tob_data[TTYOUTQ_DATASIZE];
};
static uma_zone_t ttyoutq_zone;
#define TTYOUTQ_INSERT_TAIL(to, tob) \
do \
{ \
if (to->to_end == 0) \
{ \
tob->tob_next = to->to_firstblock; \
to->to_firstblock = tob; \
} \
else \
{ \
tob->tob_next = to->to_lastblock->tob_next; \
to->to_lastblock->tob_next = tob; \
} \
to->to_nblocks++; \
} while (0)
#define TTYOUTQ_REMOVE_HEAD(to) \
do \
{ \
to->to_firstblock = to->to_firstblock->tob_next; \
to->to_nblocks--; \
} while (0)
#define TTYOUTQ_RECYCLE(to, tob) \
do \
{ \
if (to->to_quota <= to->to_nblocks) \
uma_zfree(ttyoutq_zone, tob); \
else \
TTYOUTQ_INSERT_TAIL(to, tob); \
} while (0)
void ttyoutq_flush(struct ttyoutq *to)
{
to->to_begin = 0;
to->to_end = 0;
}
int ttyoutq_setsize(struct ttyoutq *to, struct lwp_tty *tp, size_t size)
{
struct ttyoutq_block *tob;
to->to_quota = howmany(size, TTYOUTQ_DATASIZE);
while (to->to_quota > to->to_nblocks)
{
/*
* List is getting bigger.
* Add new blocks to the tail of the list.
*
* We must unlock the TTY temporarily, because we need
* to allocate memory. This won't be a problem, because
* in the worst case, another thread ends up here, which
* may cause us to allocate too many blocks, but this
* will be caught by the loop below.
*/
tty_unlock(tp);
tob = uma_zalloc(ttyoutq_zone, M_WAITOK);
tty_lock(tp);
if (tty_gone(tp))
{
uma_zfree(ttyoutq_zone, tob);
return -ENXIO;
}
TTYOUTQ_INSERT_TAIL(to, tob);
}
return 0;
}
void ttyoutq_free(struct ttyoutq *to)
{
struct ttyoutq_block *tob;
ttyoutq_flush(to);
to->to_quota = 0;
while ((tob = to->to_firstblock) != NULL)
{
TTYOUTQ_REMOVE_HEAD(to);
uma_zfree(ttyoutq_zone, tob);
}
MPASS(to->to_nblocks == 0);
}
size_t ttyoutq_read(struct ttyoutq *to, void *buf, size_t len)
{
char *cbuf = buf;
while (len > 0)
{
struct ttyoutq_block *tob;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (to->to_begin == to->to_end)
break;
tob = to->to_firstblock;
if (tob == NULL)
break;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = to->to_begin;
cend = MIN(MIN(to->to_end, to->to_begin + len), TTYOUTQ_DATASIZE);
clen = cend - cbegin;
/* Copy the data out of the buffers. */
memcpy(cbuf, tob->tob_data + cbegin, clen);
cbuf += clen;
len -= clen;
if (cend == to->to_end)
{
/* Read the complete queue. */
to->to_begin = 0;
to->to_end = 0;
}
else if (cend == TTYOUTQ_DATASIZE)
{
/* Read the block until the end. */
TTYOUTQ_REMOVE_HEAD(to);
to->to_begin = 0;
to->to_end -= TTYOUTQ_DATASIZE;
TTYOUTQ_RECYCLE(to, tob);
}
else
{
/* Read the block partially. */
to->to_begin += clen;
}
}
return cbuf - (char *)buf;
}
/*
* An optimized version of ttyoutq_read() which can be used in pseudo
* TTY drivers to directly copy data from the outq to userspace, instead
* of buffering it.
*
* We can only copy data directly if we need to read the entire block
* back to the user, because we temporarily remove the block from the
* queue. Otherwise we need to copy it to a temporary buffer first, to
* make sure data remains in the correct order.
*/
int ttyoutq_read_uio(struct ttyoutq *to, struct lwp_tty *tp, struct uio *uio)
{
while (uio->uio_resid > 0)
{
int error;
struct ttyoutq_block *tob;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (to->to_begin == to->to_end)
return 0;
tob = to->to_firstblock;
if (tob == NULL)
return 0;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = to->to_begin;
cend = MIN(MIN(to->to_end, to->to_begin + uio->uio_resid),
TTYOUTQ_DATASIZE);
clen = cend - cbegin;
/*
* We can prevent buffering in some cases:
* - We need to read the block until the end.
* - We don't need to read the block until the end, but
* there is no data beyond it, which allows us to move
* the write pointer to a new block.
*/
if (cend == TTYOUTQ_DATASIZE || cend == to->to_end)
{
/*
* Fast path: zero copy. Remove the first block,
* so we can unlock the TTY temporarily.
*/
TTYOUTQ_REMOVE_HEAD(to);
to->to_begin = 0;
if (to->to_end <= TTYOUTQ_DATASIZE)
to->to_end = 0;
else
to->to_end -= TTYOUTQ_DATASIZE;
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(tob->tob_data + cbegin, clen, uio);
tty_lock(tp);
/* Block can now be readded to the list. */
TTYOUTQ_RECYCLE(to, tob);
}
else
{
char ob[TTYOUTQ_DATASIZE - 1];
/*
* Slow path: store data in a temporary buffer.
*/
memcpy(ob, tob->tob_data + cbegin, clen);
to->to_begin += clen;
MPASS(to->to_begin < TTYOUTQ_DATASIZE);
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(ob, clen, uio);
tty_lock(tp);
}
if (error != 0)
return error;
}
return 0;
}
size_t ttyoutq_write(struct ttyoutq *to, const void *buf, size_t nbytes)
{
const char *cbuf = buf;
struct ttyoutq_block *tob;
unsigned int boff;
size_t l;
while (nbytes > 0)
{
boff = to->to_end % TTYOUTQ_DATASIZE;
if (to->to_end == 0)
{
/* First time we're being used or drained. */
MPASS(to->to_begin == 0);
tob = to->to_firstblock;
if (tob == NULL)
{
/* Queue has no blocks. */
break;
}
to->to_lastblock = tob;
}
else if (boff == 0)
{
/* We reached the end of this block on last write. */
tob = to->to_lastblock->tob_next;
if (tob == NULL)
{
/* We've reached the watermark. */
break;
}
to->to_lastblock = tob;
}
else
{
tob = to->to_lastblock;
}
/* Don't copy more than was requested. */
l = MIN(nbytes, TTYOUTQ_DATASIZE - boff);
MPASS(l > 0);
memcpy(tob->tob_data + boff, cbuf, l);
cbuf += l;
nbytes -= l;
to->to_end += l;
}
return (cbuf - (const char *)buf);
}
int ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t nbytes)
{
size_t ret __unused;
if (ttyoutq_bytesleft(to) < nbytes)
return -1;
/* We should always be able to write it back. */
ret = ttyoutq_write(to, buf, nbytes);
MPASS(ret == nbytes);
return 0;
}
static int ttyoutq_startup(void)
{
ttyoutq_zone = uma_zcreate("ttyoutq", sizeof(struct ttyoutq_block), NULL,
NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
return 0;
}
INIT_PREV_EXPORT(ttyoutq_startup);
#if 0
SYSINIT(ttyoutq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyoutq_startup, NULL);
#endif

View File

@@ -0,0 +1,837 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-12-07 Shell init ver.
*/
#include <ipc/condvar.h>
#include <rid_bitmap.h>
#include <terminal/terminal.h>
#include <terminal/tty_internal.h>
#include <ptyfs.h>
#include <rtthread.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#define PTS_EXTERNAL
/*
* Per-PTS structure.
*
* List of locks
* (t) locked by tty_lock()
* (c) const until freeing
*/
struct pts_softc
{
int pts_unit; /* (c) Device unit number. */
unsigned int pts_flags; /* (t) Device flags. */
#define PTS_PKT 0x1 /* Packet mode. */
#define PTS_FINISHED 0x2 /* Return errors on read()/write(). */
#define PTS_PTLOCKED 0x4 /* ioctl %TIOCSPTLCK/%TIOCGPTLCK */
char pts_pkt; /* (t) Unread packet mode data. */
struct rt_condvar pts_inwait; /* (t) Blocking write() on master. */
struct rt_wqueue pts_inpoll; /* (t) Select queue for write(). */
struct rt_condvar pts_outwait; /* (t) Blocking read() on master. */
struct rt_wqueue pts_outpoll; /* (t) Select queue for read(). */
struct ucred *pts_cred; /* (c) Resource limit. */
rt_device_t pts_master; /** (c) Master device.
* (Note: in rtsmart kernel, we support
* multi-instance ptmx )
*/
};
/**
* Controller-side file operations.
* (P)seudo-(T)erminal (M)ultiple(X)er
*/
static int ptsdev_read(struct lwp_tty *tp, struct uio *uio,
struct ucred *active_cred, int oflags,
struct rt_thread *td)
{
struct pts_softc *psc = tty_softc(tp);
int error = 0;
char pkt;
if (uio->uio_resid == 0)
return (0);
tty_lock(tp);
for (;;)
{
/*
* Implement packet mode. When packet mode is turned on,
* the first byte contains a bitmask of events that
* occurred (start, stop, flush, window size, etc).
*/
if (psc->pts_flags & PTS_PKT && psc->pts_pkt)
{
pkt = psc->pts_pkt;
psc->pts_pkt = 0;
tty_unlock(tp);
error = uiomove(&pkt, 1, uio);
return (error);
}
/*
* Transmit regular data.
*
* XXX: We shouldn't use ttydisc_getc_poll()! Even
* though in this implementation, there is likely going
* to be data, we should just call ttydisc_getc_uio()
* and use its return value to sleep.
*/
if (ttydisc_getc_poll(tp))
{
if (psc->pts_flags & PTS_PKT)
{
/*
* XXX: Small race. Fortunately PTY
* consumers aren't multithreaded.
*/
tty_unlock(tp);
pkt = TIOCPKT_DATA;
error = uiomove(&pkt, 1, uio);
if (error)
return (error);
tty_lock(tp);
}
error = ttydisc_getc_uio(tp, uio);
break;
}
/* Maybe the device isn't used anyway. */
if (psc->pts_flags & PTS_FINISHED)
break;
/* Wait for more data. */
if (oflags & O_NONBLOCK)
{
error = EWOULDBLOCK;
break;
}
error = cv_wait_sig(&psc->pts_outwait, tp->t_mtx);
if (error != 0)
break;
}
tty_unlock(tp);
return (error);
}
static int ptsdev_write(struct lwp_tty *tp, struct uio *uio,
struct ucred *active_cred, int oflags,
struct rt_thread *td)
{
struct pts_softc *psc = tty_softc(tp);
char ib[256], *ibstart;
size_t iblen, rintlen;
int error = 0;
if (uio->uio_resid == 0)
return (0);
for (;;)
{
ibstart = ib;
iblen = MIN(uio->uio_resid, sizeof ib);
error = uiomove(ib, iblen, uio);
tty_lock(tp);
if (error != 0)
{
iblen = 0;
goto done;
}
/*
* When possible, avoid the slow path. rint_bypass()
* copies all input to the input queue at once.
*/
MPASS(iblen > 0);
do
{
rintlen = ttydisc_rint_simple(tp, ibstart, iblen);
ibstart += rintlen;
iblen -= rintlen;
if (iblen == 0)
{
/* All data written. */
break;
}
/* Maybe the device isn't used anyway. */
if (psc->pts_flags & PTS_FINISHED)
{
error = -EIO;
goto done;
}
/* Wait for more data. */
if (oflags & O_NONBLOCK)
{
error = -EWOULDBLOCK;
goto done;
}
/* Wake up users on the slave side. */
ttydisc_rint_done(tp);
error = cv_wait_sig(&psc->pts_inwait, tp->t_mtx);
if (error != 0)
goto done;
} while (iblen > 0);
if (uio->uio_resid == 0)
break;
tty_unlock(tp);
}
done:
ttydisc_rint_done(tp);
tty_unlock(tp);
/*
* Don't account for the part of the buffer that we couldn't
* pass to the TTY.
*/
uio->uio_resid += iblen;
return (error);
}
static int ptsdev_ioctl(struct lwp_tty *tp, rt_ubase_t cmd, void *data,
struct ucred *active_cred, int fflags,
struct rt_thread *td)
{
struct pts_softc *psc = tty_softc(tp);
int error = 0, sig;
switch (cmd)
{
#ifdef USING_BSD_IOCTL_EXT
case FIODTYPE:
*(int *)data = D_TTY;
return (0);
#endif
case FIONBIO:
/* This device supports non-blocking operation. */
return (0);
case FIONREAD:
tty_lock(tp);
if (psc->pts_flags & PTS_FINISHED)
{
/* Force read() to be called. */
*(int *)data = 1;
}
else
{
*(int *)data = ttydisc_getc_poll(tp);
}
tty_unlock(tp);
return (0);
#ifdef USING_BSD_IOCTL_EXT
case FIODGNAME:
#ifdef COMPAT_FREEBSD32
case FIODGNAME_32:
#endif
{
struct fiodgname_arg *fgn;
const char *p;
int i;
/* Reverse device name lookups, for ptsname() and ttyname(). */
fgn = data;
p = tty_devname(tp);
i = strlen(p) + 1;
if (i > fgn->len)
return -EINVAL;
return (copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i));
}
#endif
/*
* We need to implement TIOCGPGRP and TIOCGSID here again. When
* called on the pseudo-terminal master, it should not check if
* the terminal is the foreground terminal of the calling
* process.
*
* TIOCGETA is also implemented here. Various Linux PTY routines
* often call isatty(), which is implemented by tcgetattr().
*/
case TIOCGETA:
/* Obtain terminal flags through tcgetattr(). */
tty_lock(tp);
*(struct termios *)data = tp->t_termios;
tty_unlock(tp);
return (0);
case TIOCSETAF:
case TIOCSETAW:
/*
* We must make sure we turn tcsetattr() calls of TCSAFLUSH and
* TCSADRAIN into something different. If an application would
* call TCSAFLUSH or TCSADRAIN on the master descriptor, it may
* deadlock waiting for all data to be read.
*/
cmd = TIOCSETA;
break;
case TIOCGPTN:
/*
* Get the device unit number.
*/
if (psc->pts_unit < 0)
return -ENOTTY;
*(unsigned int *)data = psc->pts_unit;
return (0);
case TIOCGPGRP:
/* Get the foreground process group ID. */
tty_lock(tp);
if (tp->t_pgrp != NULL)
*(int *)data = tp->t_pgrp->pgid;
else
*(int *)data = NO_PID;
tty_unlock(tp);
return (0);
case TIOCGSID:
/* Get the session leader process ID. */
tty_lock(tp);
if (tp->t_session == NULL)
error = -ENOTTY;
else
*(int *)data = tp->t_session->sid;
tty_unlock(tp);
return (error);
#ifdef USING_BSD_IOCTL_EXT
case TIOCPTMASTER:
/* Yes, we are a pseudo-terminal master. */
return (0);
#endif /* USING_BSD_IOCTL_EXT */
case TIOCSIG:
/* Signal the foreground process group. */
sig = *(int *)data;
if (sig < 1 || sig >= _LWP_NSIG)
return -EINVAL;
tty_lock(tp);
lwp_tty_signal_pgrp(tp, sig);
tty_unlock(tp);
return (0);
case TIOCPKT:
/* Enable/disable packet mode. */
tty_lock(tp);
if (*(int *)data)
psc->pts_flags |= PTS_PKT;
else
psc->pts_flags &= ~PTS_PKT;
tty_unlock(tp);
return (0);
}
/* Just redirect this ioctl to the slave device. */
tty_lock(tp);
error = tty_ioctl(tp, cmd, data, fflags, td);
tty_unlock(tp);
if (error == -ENOIOCTL)
error = -ENOTTY;
return error;
}
static int ptsdev_poll(struct lwp_tty *tp, struct rt_pollreq *req,
struct ucred *active_cred, struct rt_thread *td)
{
struct pts_softc *psc = tty_softc(tp);
int revents = 0;
int events = req->_key;
tty_lock(tp);
if (psc->pts_flags & PTS_FINISHED)
{
/* Slave device is not opened. */
tty_unlock(tp);
return ((events & (POLLIN | POLLRDNORM)) | POLLHUP);
}
if (events & (POLLIN | POLLRDNORM))
{
/* See if we can getc something. */
if (ttydisc_getc_poll(tp) || (psc->pts_flags & PTS_PKT && psc->pts_pkt))
revents |= events & (POLLIN | POLLRDNORM);
}
if (events & (POLLOUT | POLLWRNORM))
{
/* See if we can rint something. */
if (ttydisc_rint_poll(tp))
revents |= events & (POLLOUT | POLLWRNORM);
}
/*
* No need to check for POLLHUP here. This device cannot be used
* as a callout device, which means we always have a carrier,
* because the master is.
*/
if (revents == 0)
{
/*
* This code might look misleading, but the naming of
* poll events on this side is the opposite of the slave
* device.
*/
if (events & (POLLIN | POLLRDNORM))
rt_poll_add(&psc->pts_outpoll, req);
if (events & (POLLOUT | POLLWRNORM))
rt_poll_add(&psc->pts_inpoll, req);
}
tty_unlock(tp);
return (revents);
}
#if USING_BSD_KQUEUE
/*
* kqueue support.
*/
static void pts_kqops_read_detach(struct knote *kn)
{
struct file *fp = kn->kn_fp;
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
knlist_remove(&psc->pts_outpoll.si_note, kn, 0);
}
static int pts_kqops_read_event(struct knote *kn, long hint)
{
struct file *fp = kn->kn_fp;
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
if (psc->pts_flags & PTS_FINISHED)
{
kn->kn_flags |= EV_EOF;
return (1);
}
else
{
kn->kn_data = ttydisc_getc_poll(tp);
return (kn->kn_data > 0);
}
}
static void pts_kqops_write_detach(struct knote *kn)
{
struct file *fp = kn->kn_fp;
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
knlist_remove(&psc->pts_inpoll.si_note, kn, 0);
}
static int pts_kqops_write_event(struct knote *kn, long hint)
{
struct file *fp = kn->kn_fp;
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
if (psc->pts_flags & PTS_FINISHED)
{
kn->kn_flags |= EV_EOF;
return (1);
}
else
{
kn->kn_data = ttydisc_rint_poll(tp);
return (kn->kn_data > 0);
}
}
static struct filterops pts_kqops_read = {
.f_isfd = 1,
.f_detach = pts_kqops_read_detach,
.f_event = pts_kqops_read_event,
};
static struct filterops pts_kqops_write = {
.f_isfd = 1,
.f_detach = pts_kqops_write_detach,
.f_event = pts_kqops_write_event,
};
static int ptsdev_kqfilter(struct file *fp, struct knote *kn)
{
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
int error = 0;
tty_lock(tp);
switch (kn->kn_filter)
{
case EVFILT_READ:
kn->kn_fop = &pts_kqops_read;
knlist_add(&psc->pts_outpoll.si_note, kn, 1);
break;
case EVFILT_WRITE:
kn->kn_fop = &pts_kqops_write;
knlist_add(&psc->pts_inpoll.si_note, kn, 1);
break;
default:
error = EINVAL;
break;
}
tty_unlock(tp);
return (error);
}
#endif
#if USING_BSD_STAT
static int ptsdev_stat(struct file *fp, struct stat *sb,
struct ucred *active_cred)
{
struct lwp_tty *tp = fp->f_data;
#ifdef PTS_EXTERNAL
struct pts_softc *psc = tty_softc(tp);
#endif /* PTS_EXTERNAL */
struct cdev *dev = tp->t_dev;
/*
* According to POSIX, we must implement an fstat(). This also
* makes this implementation compatible with Linux binaries,
* because Linux calls fstat() on the pseudo-terminal master to
* obtain st_rdev.
*
* XXX: POSIX also mentions we must fill in st_dev, but how?
*/
bzero(sb, sizeof *sb);
#ifdef PTS_EXTERNAL
if (psc->pts_cdev != NULL)
sb->st_ino = sb->st_rdev = dev2udev(psc->pts_cdev);
else
#endif /* PTS_EXTERNAL */
sb->st_ino = sb->st_rdev = tty_udev(tp);
sb->st_atim = dev->si_atime;
sb->st_ctim = dev->si_ctime;
sb->st_mtim = dev->si_mtime;
sb->st_uid = dev->si_uid;
sb->st_gid = dev->si_gid;
sb->st_mode = dev->si_mode | S_IFCHR;
return (0);
}
#endif
static int ptsdev_close(struct lwp_tty *tp, struct rt_thread *td)
{
/* Deallocate TTY device. */
tty_lock(tp);
tty_rel_gone(tp);
#ifdef USING_BSD_VNODE
/* TODO: consider the vnode operation on DFS */
/*
* Open of /dev/ptmx or /dev/ptyXX changes the type of file
* from DTYPE_VNODE to DTYPE_PTS. vn_open() increases vnode
* use count, we need to decrement it, and possibly do other
* required cleanup.
*/
if (fp->f_vnode != NULL)
return (vnops.fo_close(fp, td));
#endif /* USING_BSD_VNODE */
return 0;
}
#ifdef USING_BSD_KINFO
static int ptsdev_fill_kinfo(struct file *fp, struct kinfo_file *kif,
struct filedesc *fdp)
{
struct lwp_tty *tp;
kif->kf_type = KF_TYPE_PTS;
tp = fp->f_data;
kif->kf_un.kf_pts.kf_pts_dev = tty_udev(tp);
kif->kf_un.kf_pts.kf_pts_dev_freebsd11 =
kif->kf_un.kf_pts.kf_pts_dev; /* truncate */
strlcpy(kif->kf_path, tty_devname(tp), sizeof(kif->kf_path));
return (0);
}
#endif
struct bsd_fileops bsd_ptsdev_methods = {
.fo_read = ptsdev_read,
.fo_write = ptsdev_write,
// .fo_truncate = invfo_truncate,
.fo_ioctl = ptsdev_ioctl,
.fo_poll = ptsdev_poll,
// .fo_kqfilter = ptsdev_kqfilter,
// .fo_stat = ptsdev_stat,
.fo_close = ptsdev_close,
// .fo_chmod = invfo_chmod,
// .fo_chown = invfo_chown,
// .fo_sendfile = invfo_sendfile,
// .fo_fill_kinfo = ptsdev_fill_kinfo,
.fo_flags = DFLAG_PASSABLE,
};
/*
* Driver-side hooks.
*/
static void ptsdrv_outwakeup(struct lwp_tty *tp)
{
struct pts_softc *psc = tty_softc(tp);
cv_broadcast(&psc->pts_outwait);
rt_wqueue_wakeup_all(&psc->pts_outpoll, (void *)POLLIN);
}
static void ptsdrv_inwakeup(struct lwp_tty *tp)
{
struct pts_softc *psc = tty_softc(tp);
cv_broadcast(&psc->pts_inwait);
rt_wqueue_wakeup_all(&psc->pts_inpoll, (void *)POLLOUT);
}
static int ptsdrv_open(struct lwp_tty *tp)
{
struct pts_softc *psc = tty_softc(tp);
/* for ioctl(TIOCSPTLCK) */
if (psc->pts_flags & PTS_PTLOCKED)
return -EIO;
psc->pts_flags &= ~PTS_FINISHED;
return 0;
}
static void ptsdrv_close(struct lwp_tty *tp)
{
struct pts_softc *psc = tty_softc(tp);
/* Wake up any blocked readers/writers. */
psc->pts_flags |= PTS_FINISHED;
ptsdrv_outwakeup(tp);
ptsdrv_inwakeup(tp);
}
static void ptsdrv_pktnotify(struct lwp_tty *tp, char event)
{
struct pts_softc *psc = tty_softc(tp);
/*
* Clear conflicting flags.
*/
switch (event)
{
case TIOCPKT_STOP:
psc->pts_pkt &= ~TIOCPKT_START;
break;
case TIOCPKT_START:
psc->pts_pkt &= ~TIOCPKT_STOP;
break;
case TIOCPKT_NOSTOP:
psc->pts_pkt &= ~TIOCPKT_DOSTOP;
break;
case TIOCPKT_DOSTOP:
psc->pts_pkt &= ~TIOCPKT_NOSTOP;
break;
}
psc->pts_pkt |= event;
/**
* Note: on smart, we don't wakeup master until it's willing to accept
* packet event. Because on poll, we setup POLLIN for PTS_PKT only. So There
* is a chance when we wakeup ipc but we can't wakeup user again. Since
* current wakeup will remove the wakequeue node on the meanwhile
*/
if (psc->pts_flags & PTS_PKT)
ptsdrv_outwakeup(tp);
}
static void ptsdrv_free(void *softc)
{
struct pts_softc *psc = softc;
/* Make device number available again. */
if (psc->pts_unit >= 0)
ptyfs_unregister_pts(psc->pts_master, psc->pts_unit);
#ifdef USING_BSD_UCRED
chgptscnt(psc->pts_cred->cr_ruidinfo, -1, 0);
racct_sub_cred(psc->pts_cred, RACCT_NPTS, 1);
crfree(psc->pts_cred);
#endif
rt_wqueue_wakeup_all(&psc->pts_inpoll, (void *)POLLHUP);
rt_wqueue_wakeup_all(&psc->pts_outpoll, (void *)POLLHUP);
rt_free(psc);
}
static struct lwp_ttydevsw pts_class = {
.tsw_flags = TF_NOPREFIX,
.tsw_outwakeup = ptsdrv_outwakeup,
.tsw_inwakeup = ptsdrv_inwakeup,
.tsw_open = ptsdrv_open,
.tsw_close = ptsdrv_close,
.tsw_pktnotify = ptsdrv_pktnotify,
.tsw_free = ptsdrv_free,
};
int pts_alloc(int fflags, struct rt_thread *td, struct dfs_file *ptm_file)
{
int unit;
struct lwp_tty *tp;
struct pts_softc *psc;
char name_buf[DIRENT_NAME_MAX];
const char *rootpath;
rt_device_t ptmx_device = ptm_file->vnode->data;
#ifdef USING_BSD_UCRED
struct rt_lwp *p = td->lwp;
int ok, error;
struct ucred *cred = td->td_ucred;
#endif
/* Resource limiting. */
#ifdef USING_BSD_UCRED
LWP_LOCK(p);
error = racct_add(p, RACCT_NPTS, 1);
if (error != 0)
{
LWP_UNLOCK(p);
return -EAGAIN;
}
ok = chgptscnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPTS));
if (!ok)
{
racct_sub(p, RACCT_NPTS, 1);
LWP_UNLOCK(p);
return -EAGAIN;
}
LWP_UNLOCK(p);
#endif
/* Allocate TTY and softc. */
psc = rt_calloc(1, sizeof(struct pts_softc));
cv_init(&psc->pts_inwait, "ptsin");
cv_init(&psc->pts_outwait, "ptsout");
rt_wqueue_init(&psc->pts_inpoll);
rt_wqueue_init(&psc->pts_outpoll);
psc->pts_master = ptmx_device;
#ifdef USING_BSD_UCRED
psc->pts_cred = crhold(cred);
#else
psc->pts_cred = 0;
#endif
tp = lwp_tty_create(&pts_class, psc);
if (!tp)
{
rt_free(psc);
rt_condvar_detach(&psc->pts_inwait);
rt_condvar_detach(&psc->pts_outwait);
return -ENOMEM;
}
/* Try to allocate a new pts uint*/
unit = ptyfs_register_pts(ptmx_device, &tp->parent);
if (unit < 0)
{
#ifdef USING_BSD_UCRED
racct_sub(p, RACCT_NPTS, 1);
chgptscnt(cred->cr_ruidinfo, -1, 0);
#endif
lwp_tty_delete(tp);
return -EAGAIN;
}
psc->pts_unit = unit;
/* Expose the slave device as well. */
#ifdef USING_BSD_UCRED
tty_makedev(tp, td->td_ucred, "pts/%u", psc->pts_unit);
#else
rootpath = ptyfs_get_rootpath(ptmx_device);
RT_ASSERT(rootpath[strlen(rootpath) - 1] != '/');
snprintf(name_buf, DIRENT_NAME_MAX, "%s/%d", rootpath, psc->pts_unit);
/* setup the pts */
lwp_tty_register(tp, name_buf);
/* now this file operating on new pty */
ptm_file->data = tp;
#endif
return 0;
}
void pts_set_lock(lwp_tty_t pts, rt_bool_t is_lock)
{
struct pts_softc *psc = tty_softc(pts);
if (is_lock)
psc->pts_flags |= PTS_PTLOCKED;
else
psc->pts_flags &= ~PTS_PTLOCKED;
}
rt_bool_t pts_is_locked(lwp_tty_t pts)
{
struct pts_softc *psc = tty_softc(pts);
return !!(psc->pts_flags & PTS_PTLOCKED);
}
int pts_get_pktmode(lwp_tty_t pts)
{
struct pts_softc *psc = tty_softc(pts);
return !!(psc->pts_flags & PTS_PKT);
}

File diff suppressed because it is too large Load Diff