2005-04-16 13:20:01 +08:00
|
|
|
/* fhandler_fifo.cc - See fhandler.h for a description of the fhandler classes.
|
2003-09-20 08:31:13 +08:00
|
|
|
|
|
|
|
This file is part of Cygwin.
|
|
|
|
|
|
|
|
This software is a copyrighted work licensed under the terms of the
|
|
|
|
Cygwin license. Please consult the file "CYGWIN_LICENSE" for
|
|
|
|
details. */
|
|
|
|
|
|
|
|
#include "winsup.h"
|
2019-03-23 03:30:36 +08:00
|
|
|
#include <w32api/winioctl.h>
|
2008-04-08 00:15:45 +08:00
|
|
|
#include "miscfuncs.h"
|
2003-09-20 08:31:13 +08:00
|
|
|
|
|
|
|
#include "cygerrno.h"
|
|
|
|
#include "security.h"
|
|
|
|
#include "path.h"
|
|
|
|
#include "fhandler.h"
|
|
|
|
#include "dtable.h"
|
|
|
|
#include "cygheap.h"
|
2007-07-08 01:00:33 +08:00
|
|
|
#include "sigproc.h"
|
|
|
|
#include "cygtls.h"
|
2009-10-31 21:24:06 +08:00
|
|
|
#include "shared_info.h"
|
2011-04-29 16:27:11 +08:00
|
|
|
#include "ntdll.h"
|
2012-06-18 04:50:24 +08:00
|
|
|
#include "cygwait.h"
|
2020-04-24 06:43:42 +08:00
|
|
|
#include <sys/param.h>
|
2003-09-20 08:31:13 +08:00
|
|
|
|
2020-02-02 05:36:31 +08:00
|
|
|
/*
|
|
|
|
Overview:
|
|
|
|
|
2020-04-14 21:45:44 +08:00
|
|
|
FIFOs are implemented via Windows named pipes. The server end of
|
|
|
|
the pipe corresponds to an fhandler_fifo open for reading (a.k.a,
|
|
|
|
a "reader"), and the client end corresponds to an fhandler_fifo
|
|
|
|
open for writing (a.k.a, a "writer").
|
|
|
|
|
|
|
|
The server can have multiple instances. The reader (assuming for
|
|
|
|
the moment that there is only one) creates a pipe instance for
|
|
|
|
each writer that opens. The reader maintains a list of
|
|
|
|
"fifo_client_handler" structures, one for each writer. A
|
|
|
|
fifo_client_handler contains the handle for the pipe server
|
|
|
|
instance and information about the state of the connection with
|
2020-08-03 21:38:08 +08:00
|
|
|
the writer. Access to the list is controlled by a
|
|
|
|
"fifo_client_lock".
|
2020-04-14 21:45:44 +08:00
|
|
|
|
|
|
|
The reader runs a "fifo_reader_thread" that creates new pipe
|
|
|
|
instances as needed and listens for client connections.
|
|
|
|
|
2020-08-03 21:38:08 +08:00
|
|
|
The connection state of a fifo_client_handler has one of the
|
|
|
|
following values, in which order is important:
|
|
|
|
|
|
|
|
fc_unknown
|
|
|
|
fc_error
|
|
|
|
fc_disconnected
|
|
|
|
fc_closing
|
|
|
|
fc_listening
|
|
|
|
fc_connected
|
|
|
|
fc_input_avail
|
|
|
|
|
|
|
|
It can be changed in the following places:
|
|
|
|
|
|
|
|
- It is set to fc_listening when the pipe instance is created.
|
|
|
|
|
|
|
|
- It is set to fc_connected when the fifo_reader_thread detects
|
|
|
|
a connection.
|
|
|
|
|
|
|
|
- It is set to a value reported by the O/S when
|
|
|
|
query_and_set_state is called. This can happen in
|
|
|
|
select.cc:peek_fifo and a couple other places.
|
|
|
|
|
|
|
|
- It is set to fc_disconnected by raw_read when an attempt to
|
|
|
|
read yields STATUS_PIPE_BROKEN.
|
|
|
|
|
|
|
|
- It is set to fc_error in various places when unexpected
|
|
|
|
things happen.
|
|
|
|
|
|
|
|
State changes are always guarded by fifo_client_lock.
|
|
|
|
|
2020-04-14 21:45:44 +08:00
|
|
|
If there are multiple readers open, only one of them, called the
|
|
|
|
"owner", maintains the fifo_client_handler list. The owner is
|
|
|
|
therefore the only reader that can read at any given time. If a
|
|
|
|
different reader wants to read, it has to take ownership and
|
|
|
|
duplicate the fifo_client_handler list.
|
|
|
|
|
|
|
|
A reader that is not an owner also runs a fifo_reader_thread,
|
|
|
|
which is mostly idle. The thread wakes up if that reader might
|
|
|
|
need to take ownership.
|
|
|
|
|
2020-07-12 03:43:44 +08:00
|
|
|
There is a block of named shared memory, accessible to all
|
|
|
|
fhandlers for a given FIFO. It keeps track of the number of open
|
|
|
|
readers and writers; it contains information needed for the owner
|
|
|
|
change process; and it contains some locks to prevent races and
|
|
|
|
deadlocks between the various threads.
|
|
|
|
|
|
|
|
The shared memory is created by the first reader to open the
|
|
|
|
FIFO. It is opened by subsequent readers and by all writers. It
|
|
|
|
is destroyed by Windows when the last handle to it is closed.
|
|
|
|
|
|
|
|
If a handle to it somehow remains open after all processes
|
|
|
|
holding file descriptors to the FIFO have closed, the shared
|
|
|
|
memory can persist and be reused with stale data by the next
|
|
|
|
process that opens the FIFO. So far I've seen this happen only
|
|
|
|
as a result of a bug in the code, but there are some debug_printf
|
|
|
|
statements in fhandler_fifo::open to help detect this if it
|
|
|
|
happens again.
|
2020-04-14 21:45:44 +08:00
|
|
|
|
|
|
|
At this writing, I know of only one application (Midnight
|
|
|
|
Commander when running under tcsh) that *explicitly* opens two
|
|
|
|
readers of a FIFO. But many applications will have multiple
|
|
|
|
readers open via dup/fork/exec.
|
2020-02-02 05:36:31 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
2019-03-23 03:30:36 +08:00
|
|
|
/* This is only to be used for writers. When reading,
|
|
|
|
STATUS_PIPE_EMPTY simply means there's no data to be read. */
|
|
|
|
#define STATUS_PIPE_IS_CLOSED(status) \
|
|
|
|
({ NTSTATUS _s = (status); \
|
|
|
|
_s == STATUS_PIPE_CLOSING \
|
|
|
|
|| _s == STATUS_PIPE_BROKEN \
|
|
|
|
|| _s == STATUS_PIPE_EMPTY; })
|
|
|
|
|
2019-04-15 03:16:02 +08:00
|
|
|
#define STATUS_PIPE_NO_INSTANCE_AVAILABLE(status) \
|
|
|
|
({ NTSTATUS _s = (status); \
|
|
|
|
_s == STATUS_INSTANCE_NOT_AVAILABLE \
|
|
|
|
|| _s == STATUS_PIPE_NOT_AVAILABLE \
|
|
|
|
|| _s == STATUS_PIPE_BUSY; })
|
|
|
|
|
2020-04-24 06:43:42 +08:00
|
|
|
/* Number of pages reserved for shared_fc_handler. */
|
|
|
|
#define SH_FC_HANDLER_PAGES 100
|
|
|
|
|
2020-03-26 07:22:10 +08:00
|
|
|
static NO_COPY fifo_reader_id_t null_fr_id = { .winpid = 0, .fh = NULL };
|
|
|
|
|
2007-07-08 01:00:33 +08:00
|
|
|
fhandler_fifo::fhandler_fifo ():
|
2020-04-26 21:38:46 +08:00
|
|
|
fhandler_base (),
|
|
|
|
read_ready (NULL), write_ready (NULL), writer_opening (NULL),
|
2020-04-25 21:54:18 +08:00
|
|
|
owner_needed_evt (NULL), owner_found_evt (NULL), update_needed_evt (NULL),
|
2020-11-09 01:02:39 +08:00
|
|
|
cancel_evt (NULL), thr_sync_evt (NULL), pipe_name_buf (NULL),
|
2020-04-03 01:47:18 +08:00
|
|
|
fc_handler (NULL), shandlers (0), nhandlers (0),
|
2020-04-26 21:38:46 +08:00
|
|
|
reader (false), writer (false), duplexer (false),
|
2020-03-18 00:29:56 +08:00
|
|
|
max_atomic_write (DEFAULT_PIPEBUFSIZE),
|
2020-04-24 06:43:42 +08:00
|
|
|
me (null_fr_id), shmem_handle (NULL), shmem (NULL),
|
|
|
|
shared_fc_hdl (NULL), shared_fc_handler (NULL)
|
2003-09-20 08:31:13 +08:00
|
|
|
{
|
2007-07-08 01:00:33 +08:00
|
|
|
need_fork_fixup (true);
|
2003-09-20 08:31:13 +08:00
|
|
|
}
|
|
|
|
|
2019-03-23 03:30:36 +08:00
|
|
|
PUNICODE_STRING
|
|
|
|
fhandler_fifo::get_pipe_name ()
|
2009-07-25 04:54:33 +08:00
|
|
|
{
|
2020-11-09 01:02:39 +08:00
|
|
|
if (!pipe_name_buf)
|
2019-03-23 03:30:36 +08:00
|
|
|
{
|
2020-11-09 01:02:39 +08:00
|
|
|
pipe_name.Length = CYGWIN_FIFO_PIPE_NAME_LEN * sizeof (WCHAR);
|
|
|
|
pipe_name.MaximumLength = pipe_name.Length + sizeof (WCHAR);
|
|
|
|
pipe_name_buf = (PWCHAR) cmalloc_abort (HEAP_STR,
|
|
|
|
pipe_name.MaximumLength);
|
|
|
|
pipe_name.Buffer = pipe_name_buf;
|
2019-03-23 03:30:36 +08:00
|
|
|
__small_swprintf (pipe_name_buf, L"%S-fifo.%08x.%016X",
|
|
|
|
&cygheap->installation_key, get_dev (), get_ino ());
|
|
|
|
}
|
|
|
|
return &pipe_name;
|
2009-07-25 04:54:33 +08:00
|
|
|
}
|
|
|
|
|
2010-01-15 02:46:02 +08:00
|
|
|
inline PSECURITY_ATTRIBUTES
|
|
|
|
sec_user_cloexec (bool cloexec, PSECURITY_ATTRIBUTES sa, PSID sid)
|
|
|
|
{
|
|
|
|
return cloexec ? sec_user_nih (sa, sid) : sec_user (sa, sid);
|
|
|
|
}
|
2005-04-22 21:58:09 +08:00
|
|
|
|
2019-03-23 03:30:37 +08:00
|
|
|
static HANDLE
|
2020-05-19 22:14:10 +08:00
|
|
|
create_event ()
|
2019-03-23 03:30:37 +08:00
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
OBJECT_ATTRIBUTES attr;
|
|
|
|
HANDLE evt = NULL;
|
|
|
|
|
2020-05-19 22:14:10 +08:00
|
|
|
InitializeObjectAttributes (&attr, NULL, 0, NULL, NULL);
|
2019-03-23 03:30:37 +08:00
|
|
|
status = NtCreateEvent (&evt, EVENT_ALL_ACCESS, &attr,
|
|
|
|
NotificationEvent, FALSE);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return evt;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_pipe_non_blocking (HANDLE ph, bool nonblocking)
|
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
FILE_PIPE_INFORMATION fpi;
|
|
|
|
|
|
|
|
fpi.ReadMode = FILE_PIPE_MESSAGE_MODE;
|
|
|
|
fpi.CompletionMode = nonblocking ? FILE_PIPE_COMPLETE_OPERATION
|
|
|
|
: FILE_PIPE_QUEUE_OPERATION;
|
|
|
|
status = NtSetInformationFile (ph, &io, &fpi, sizeof fpi,
|
|
|
|
FilePipeInformation);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
debug_printf ("NtSetInformationFile(FilePipeInformation): %y", status);
|
|
|
|
}
|
|
|
|
|
2019-03-23 03:30:36 +08:00
|
|
|
NTSTATUS
|
|
|
|
fhandler_fifo::npfs_handle (HANDLE &nph)
|
|
|
|
{
|
|
|
|
static NO_COPY SRWLOCK npfs_lock;
|
|
|
|
static NO_COPY HANDLE npfs_dirh;
|
|
|
|
|
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
|
|
|
OBJECT_ATTRIBUTES attr;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
|
|
|
|
/* Lockless after first call. */
|
|
|
|
if (npfs_dirh)
|
|
|
|
{
|
|
|
|
nph = npfs_dirh;
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
AcquireSRWLockExclusive (&npfs_lock);
|
|
|
|
if (!npfs_dirh)
|
|
|
|
{
|
|
|
|
InitializeObjectAttributes (&attr, &ro_u_npfs, 0, NULL, NULL);
|
|
|
|
status = NtOpenFile (&npfs_dirh, FILE_READ_ATTRIBUTES | SYNCHRONIZE,
|
|
|
|
&attr, &io, FILE_SHARE_READ | FILE_SHARE_WRITE,
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
ReleaseSRWLockExclusive (&npfs_lock);
|
|
|
|
if (NT_SUCCESS (status))
|
|
|
|
nph = npfs_dirh;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2019-03-23 03:30:37 +08:00
|
|
|
/* Called when a FIFO is first opened for reading and again each time
|
2019-04-15 03:15:56 +08:00
|
|
|
a new client handler is needed. Each pipe instance is created in
|
|
|
|
blocking mode so that we can easily wait for a connection. After
|
|
|
|
it is connected, it is put in nonblocking mode. */
|
2019-03-23 03:30:36 +08:00
|
|
|
HANDLE
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
fhandler_fifo::create_pipe_instance ()
|
2019-03-23 03:30:36 +08:00
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
HANDLE npfsh;
|
|
|
|
HANDLE ph = NULL;
|
|
|
|
ACCESS_MASK access;
|
|
|
|
OBJECT_ATTRIBUTES attr;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
ULONG hattr;
|
|
|
|
ULONG sharing;
|
|
|
|
ULONG nonblocking = FILE_PIPE_QUEUE_OPERATION;
|
2019-03-23 03:30:37 +08:00
|
|
|
ULONG max_instances = -1;
|
2019-03-23 03:30:36 +08:00
|
|
|
LARGE_INTEGER timeout;
|
|
|
|
|
|
|
|
status = npfs_handle (npfsh);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
access = GENERIC_READ | FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES
|
|
|
|
| SYNCHRONIZE;
|
|
|
|
sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
hattr = (openflags & O_CLOEXEC ? 0 : OBJ_INHERIT) | OBJ_CASE_INSENSITIVE;
|
2019-03-23 03:30:36 +08:00
|
|
|
InitializeObjectAttributes (&attr, get_pipe_name (),
|
|
|
|
hattr, npfsh, NULL);
|
|
|
|
timeout.QuadPart = -500000;
|
|
|
|
status = NtCreateNamedPipeFile (&ph, access, &attr, &io, sharing,
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
FILE_OPEN_IF, 0,
|
2019-12-22 06:53:52 +08:00
|
|
|
FILE_PIPE_MESSAGE_TYPE
|
|
|
|
| FILE_PIPE_REJECT_REMOTE_CLIENTS,
|
2019-03-23 03:30:36 +08:00
|
|
|
FILE_PIPE_MESSAGE_MODE,
|
|
|
|
nonblocking, max_instances,
|
|
|
|
DEFAULT_PIPEBUFSIZE, DEFAULT_PIPEBUFSIZE,
|
|
|
|
&timeout);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return ph;
|
|
|
|
}
|
|
|
|
|
2019-05-09 23:36:26 +08:00
|
|
|
/* Connect to a pipe instance. */
|
2019-03-23 03:30:36 +08:00
|
|
|
NTSTATUS
|
2019-05-09 23:36:26 +08:00
|
|
|
fhandler_fifo::open_pipe (HANDLE& ph)
|
2019-03-23 03:30:36 +08:00
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
HANDLE npfsh;
|
|
|
|
ACCESS_MASK access;
|
|
|
|
OBJECT_ATTRIBUTES attr;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
ULONG sharing;
|
|
|
|
|
|
|
|
status = npfs_handle (npfsh);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
return status;
|
|
|
|
access = GENERIC_WRITE | SYNCHRONIZE;
|
2019-05-29 03:50:05 +08:00
|
|
|
InitializeObjectAttributes (&attr, get_pipe_name (),
|
|
|
|
openflags & O_CLOEXEC ? 0 : OBJ_INHERIT,
|
2019-03-23 03:30:36 +08:00
|
|
|
npfsh, NULL);
|
|
|
|
sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
|
2020-03-18 02:14:47 +08:00
|
|
|
return NtOpenFile (&ph, access, &attr, &io, sharing, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait up to 100ms for a pipe instance to be available, then connect. */
|
|
|
|
NTSTATUS
|
|
|
|
fhandler_fifo::wait_open_pipe (HANDLE& ph)
|
|
|
|
{
|
|
|
|
HANDLE npfsh;
|
|
|
|
HANDLE evt;
|
|
|
|
NTSTATUS status;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
ULONG pwbuf_size;
|
|
|
|
PFILE_PIPE_WAIT_FOR_BUFFER pwbuf;
|
|
|
|
LONGLONG stamp;
|
|
|
|
LONGLONG orig_timeout = -100 * NS100PERSEC / MSPERSEC; /* 100ms */
|
|
|
|
|
|
|
|
status = npfs_handle (npfsh);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
return status;
|
|
|
|
if (!(evt = create_event ()))
|
|
|
|
api_fatal ("Can't create event, %E");
|
|
|
|
pwbuf_size
|
|
|
|
= offsetof (FILE_PIPE_WAIT_FOR_BUFFER, Name) + get_pipe_name ()->Length;
|
|
|
|
pwbuf = (PFILE_PIPE_WAIT_FOR_BUFFER) alloca (pwbuf_size);
|
|
|
|
pwbuf->Timeout.QuadPart = orig_timeout;
|
|
|
|
pwbuf->NameLength = get_pipe_name ()->Length;
|
|
|
|
pwbuf->TimeoutSpecified = TRUE;
|
|
|
|
memcpy (pwbuf->Name, get_pipe_name ()->Buffer, get_pipe_name ()->Length);
|
|
|
|
stamp = get_clock (CLOCK_MONOTONIC)->n100secs ();
|
|
|
|
bool retry;
|
|
|
|
do
|
|
|
|
{
|
|
|
|
retry = false;
|
|
|
|
status = NtFsControlFile (npfsh, evt, NULL, NULL, &io, FSCTL_PIPE_WAIT,
|
|
|
|
pwbuf, pwbuf_size, NULL, 0);
|
|
|
|
if (status == STATUS_PENDING)
|
|
|
|
{
|
|
|
|
if (WaitForSingleObject (evt, INFINITE) == WAIT_OBJECT_0)
|
|
|
|
status = io.Status;
|
|
|
|
else
|
|
|
|
api_fatal ("WFSO failed, %E");
|
|
|
|
}
|
|
|
|
if (NT_SUCCESS (status))
|
|
|
|
status = open_pipe (ph);
|
|
|
|
if (STATUS_PIPE_NO_INSTANCE_AVAILABLE (status))
|
|
|
|
{
|
|
|
|
/* Another writer has grabbed the pipe instance. Adjust
|
|
|
|
the timeout and keep waiting if there's time left. */
|
|
|
|
pwbuf->Timeout.QuadPart = orig_timeout
|
|
|
|
+ get_clock (CLOCK_MONOTONIC)->n100secs () - stamp;
|
|
|
|
if (pwbuf->Timeout.QuadPart < 0)
|
|
|
|
retry = true;
|
|
|
|
else
|
|
|
|
status = STATUS_IO_TIMEOUT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while (retry);
|
|
|
|
NtClose (evt);
|
2019-03-23 03:30:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2020-08-01 01:55:17 +08:00
|
|
|
/* Always called with fifo_client_lock in place. */
|
2019-03-23 03:30:37 +08:00
|
|
|
int
|
2020-04-24 06:43:42 +08:00
|
|
|
fhandler_fifo::add_client_handler (bool new_pipe_instance)
|
2019-03-23 03:30:37 +08:00
|
|
|
{
|
|
|
|
fifo_client_handler fc;
|
|
|
|
|
2020-04-03 01:47:18 +08:00
|
|
|
if (nhandlers >= shandlers)
|
2019-03-23 03:30:37 +08:00
|
|
|
{
|
2020-04-03 01:47:18 +08:00
|
|
|
void *temp = realloc (fc_handler,
|
|
|
|
(shandlers += 64) * sizeof (fc_handler[0]));
|
|
|
|
if (!temp)
|
|
|
|
{
|
|
|
|
shandlers -= 64;
|
|
|
|
set_errno (ENOMEM);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
fc_handler = (fifo_client_handler *) temp;
|
2019-03-23 03:30:37 +08:00
|
|
|
}
|
2020-04-24 06:43:42 +08:00
|
|
|
if (new_pipe_instance)
|
|
|
|
{
|
|
|
|
HANDLE ph = create_pipe_instance ();
|
|
|
|
if (!ph)
|
|
|
|
return -1;
|
|
|
|
fc.h = ph;
|
2020-08-03 21:32:30 +08:00
|
|
|
fc.set_state (fc_listening);
|
2020-04-24 06:43:42 +08:00
|
|
|
}
|
2020-04-03 01:47:18 +08:00
|
|
|
fc_handler[nhandlers++] = fc;
|
|
|
|
return 0;
|
2019-04-15 03:16:04 +08:00
|
|
|
}
|
2019-03-23 03:30:37 +08:00
|
|
|
|
2020-08-27 06:21:20 +08:00
|
|
|
/* Always called with fifo_client_lock in place. Delete a
|
|
|
|
client_handler by swapping it with the last one in the list. */
|
2020-03-17 06:04:28 +08:00
|
|
|
void
|
2019-04-15 03:16:04 +08:00
|
|
|
fhandler_fifo::delete_client_handler (int i)
|
|
|
|
{
|
2020-03-17 06:04:28 +08:00
|
|
|
fc_handler[i].close ();
|
2019-04-15 03:16:04 +08:00
|
|
|
if (i < --nhandlers)
|
2020-08-27 06:21:20 +08:00
|
|
|
fc_handler[i] = fc_handler[nhandlers];
|
2019-03-23 03:30:37 +08:00
|
|
|
}
|
|
|
|
|
2020-08-01 01:55:17 +08:00
|
|
|
/* Delete handlers that we will never read from. Always called with
|
|
|
|
fifo_client_lock in place. */
|
2020-04-24 06:43:42 +08:00
|
|
|
void
|
|
|
|
fhandler_fifo::cleanup_handlers ()
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
while (i < nhandlers)
|
|
|
|
{
|
2020-08-03 21:35:00 +08:00
|
|
|
if (fc_handler[i].get_state () < fc_connected)
|
2020-04-24 06:43:42 +08:00
|
|
|
delete_client_handler (i);
|
|
|
|
else
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-01 01:55:17 +08:00
|
|
|
/* Always called with fifo_client_lock in place. */
|
2019-05-09 22:58:29 +08:00
|
|
|
void
|
2020-08-03 21:38:08 +08:00
|
|
|
fhandler_fifo::record_connection (fifo_client_handler& fc, bool set,
|
2020-05-07 06:39:26 +08:00
|
|
|
fifo_client_connect_state s)
|
2019-05-09 22:58:29 +08:00
|
|
|
{
|
2020-08-03 21:38:08 +08:00
|
|
|
if (set)
|
|
|
|
fc.set_state (s);
|
2020-03-17 06:04:28 +08:00
|
|
|
set_pipe_non_blocking (fc.h, true);
|
2019-05-09 22:58:29 +08:00
|
|
|
}
|
|
|
|
|
2020-05-19 22:14:10 +08:00
|
|
|
/* Called from fifo_reader_thread_func with owner_lock in place. */
|
2020-04-24 06:43:42 +08:00
|
|
|
int
|
2020-05-19 22:14:10 +08:00
|
|
|
fhandler_fifo::update_my_handlers ()
|
2020-04-24 06:43:42 +08:00
|
|
|
{
|
2020-07-15 21:46:42 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2020-05-19 22:14:10 +08:00
|
|
|
close_all_handlers ();
|
|
|
|
fifo_reader_id_t prev = get_prev_owner ();
|
|
|
|
if (!prev)
|
2020-04-24 06:43:42 +08:00
|
|
|
{
|
2020-05-19 22:14:10 +08:00
|
|
|
debug_printf ("No previous owner to copy handles from");
|
|
|
|
return 0;
|
2020-04-24 06:43:42 +08:00
|
|
|
}
|
2020-05-19 22:14:10 +08:00
|
|
|
HANDLE prev_proc;
|
|
|
|
if (prev.winpid == me.winpid)
|
|
|
|
prev_proc = GetCurrentProcess ();
|
2020-04-24 06:43:42 +08:00
|
|
|
else
|
2020-05-19 22:14:10 +08:00
|
|
|
prev_proc = OpenProcess (PROCESS_DUP_HANDLE, false, prev.winpid);
|
|
|
|
if (!prev_proc)
|
2020-07-12 03:20:45 +08:00
|
|
|
api_fatal ("Can't open process of previous owner, %E");
|
2020-05-19 22:14:10 +08:00
|
|
|
|
2020-08-01 01:55:17 +08:00
|
|
|
fifo_client_lock ();
|
2020-05-19 22:14:10 +08:00
|
|
|
for (int i = 0; i < get_shared_nhandlers (); i++)
|
|
|
|
{
|
|
|
|
if (add_client_handler (false) < 0)
|
|
|
|
api_fatal ("Can't add client handler, %E");
|
|
|
|
fifo_client_handler &fc = fc_handler[nhandlers - 1];
|
|
|
|
if (!DuplicateHandle (prev_proc, shared_fc_handler[i].h,
|
|
|
|
GetCurrentProcess (), &fc.h, 0,
|
|
|
|
!close_on_exec (), DUPLICATE_SAME_ACCESS))
|
2020-04-24 06:43:42 +08:00
|
|
|
{
|
2020-05-19 22:14:10 +08:00
|
|
|
debug_printf ("Can't duplicate handle of previous owner, %E");
|
2020-04-24 06:43:42 +08:00
|
|
|
__seterrno ();
|
2020-08-03 21:32:30 +08:00
|
|
|
fc.set_state (fc_error);
|
2020-07-12 03:20:45 +08:00
|
|
|
fc.last_read = false;
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2020-08-03 21:32:30 +08:00
|
|
|
fc.set_state (shared_fc_handler[i].get_state ());
|
2020-07-12 03:20:45 +08:00
|
|
|
fc.last_read = shared_fc_handler[i].last_read;
|
2020-04-24 06:43:42 +08:00
|
|
|
}
|
|
|
|
}
|
2020-08-01 01:55:17 +08:00
|
|
|
fifo_client_unlock ();
|
2020-11-06 21:16:45 +08:00
|
|
|
NtClose (prev_proc);
|
2020-07-15 21:46:42 +08:00
|
|
|
set_prev_owner (null_fr_id);
|
|
|
|
return ret;
|
2020-04-24 06:43:42 +08:00
|
|
|
}
|
|
|
|
|
2020-08-01 01:55:17 +08:00
|
|
|
/* Always called with fifo_client_lock and owner_lock in place. */
|
2020-04-24 06:43:42 +08:00
|
|
|
int
|
|
|
|
fhandler_fifo::update_shared_handlers ()
|
|
|
|
{
|
|
|
|
cleanup_handlers ();
|
|
|
|
if (nhandlers > get_shared_shandlers ())
|
|
|
|
{
|
|
|
|
if (remap_shared_fc_handler (nhandlers * sizeof (fc_handler[0])) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
set_shared_nhandlers (nhandlers);
|
|
|
|
memcpy (shared_fc_handler, fc_handler, nhandlers * sizeof (fc_handler[0]));
|
2020-04-25 21:54:18 +08:00
|
|
|
shared_fc_handler_updated (true);
|
|
|
|
set_prev_owner (me);
|
2020-04-24 06:43:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-27 02:29:50 +08:00
|
|
|
static DWORD WINAPI
|
|
|
|
fifo_reader_thread (LPVOID param)
|
|
|
|
{
|
|
|
|
fhandler_fifo *fh = (fhandler_fifo *) param;
|
|
|
|
return fh->fifo_reader_thread_func ();
|
|
|
|
}
|
|
|
|
|
2019-03-23 03:30:37 +08:00
|
|
|
DWORD
|
2020-03-27 02:29:50 +08:00
|
|
|
fhandler_fifo::fifo_reader_thread_func ()
|
2019-03-23 03:30:37 +08:00
|
|
|
{
|
2020-04-30 06:53:05 +08:00
|
|
|
HANDLE conn_evt;
|
2019-06-21 03:14:47 +08:00
|
|
|
|
2020-04-30 06:53:05 +08:00
|
|
|
if (!(conn_evt = CreateEvent (NULL, false, false, NULL)))
|
|
|
|
api_fatal ("Can't create connection event, %E");
|
2019-04-15 03:16:01 +08:00
|
|
|
|
2019-03-23 03:30:37 +08:00
|
|
|
while (1)
|
|
|
|
{
|
2020-04-25 21:54:18 +08:00
|
|
|
fifo_reader_id_t cur_owner, pending_owner;
|
|
|
|
bool idle = false, take_ownership = false;
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
|
|
|
|
owner_lock ();
|
|
|
|
cur_owner = get_owner ();
|
2020-04-25 21:54:18 +08:00
|
|
|
pending_owner = get_pending_owner ();
|
|
|
|
|
|
|
|
if (pending_owner)
|
2019-04-15 03:16:04 +08:00
|
|
|
{
|
2020-08-03 04:38:24 +08:00
|
|
|
if (pending_owner == me)
|
|
|
|
take_ownership = true;
|
|
|
|
else if (cur_owner != me)
|
2020-04-25 21:54:18 +08:00
|
|
|
idle = true;
|
|
|
|
else
|
2020-08-03 04:38:24 +08:00
|
|
|
{
|
|
|
|
/* I'm the owner but someone else wants to be. Have I
|
|
|
|
already seen and reacted to update_needed_evt? */
|
|
|
|
if (WaitForSingleObject (update_needed_evt, 0) == WAIT_OBJECT_0)
|
|
|
|
{
|
|
|
|
/* No, I haven't. */
|
|
|
|
fifo_client_lock ();
|
|
|
|
if (update_shared_handlers () < 0)
|
|
|
|
api_fatal ("Can't update shared handlers, %E");
|
|
|
|
fifo_client_unlock ();
|
|
|
|
}
|
|
|
|
owner_unlock ();
|
|
|
|
/* Yield to pending owner. */
|
|
|
|
Sleep (1);
|
|
|
|
continue;
|
|
|
|
}
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
}
|
2020-04-25 21:54:18 +08:00
|
|
|
else if (!cur_owner)
|
|
|
|
take_ownership = true;
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
else if (cur_owner != me)
|
2020-04-25 21:54:18 +08:00
|
|
|
idle = true;
|
2020-07-12 02:52:55 +08:00
|
|
|
else
|
2020-08-03 04:38:24 +08:00
|
|
|
/* I'm the owner and there's no pending owner. */
|
2020-07-12 02:52:55 +08:00
|
|
|
goto owner_listen;
|
|
|
|
if (idle)
|
|
|
|
{
|
|
|
|
owner_unlock ();
|
|
|
|
HANDLE w[2] = { owner_needed_evt, cancel_evt };
|
|
|
|
switch (WaitForMultipleObjects (2, w, false, INFINITE))
|
|
|
|
{
|
|
|
|
case WAIT_OBJECT_0:
|
|
|
|
continue;
|
|
|
|
case WAIT_OBJECT_0 + 1:
|
|
|
|
goto canceled;
|
|
|
|
default:
|
|
|
|
api_fatal ("WFMO failed, %E");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (take_ownership)
|
2020-04-25 21:54:18 +08:00
|
|
|
{
|
|
|
|
if (!shared_fc_handler_updated ())
|
|
|
|
{
|
|
|
|
owner_unlock ();
|
2020-07-12 02:52:55 +08:00
|
|
|
if (IsEventSignalled (cancel_evt))
|
|
|
|
goto canceled;
|
2020-04-25 21:54:18 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
set_owner (me);
|
|
|
|
set_pending_owner (null_fr_id);
|
|
|
|
if (update_my_handlers () < 0)
|
2020-07-12 03:20:45 +08:00
|
|
|
debug_printf ("error updating my handlers, %E");
|
2020-04-25 21:54:18 +08:00
|
|
|
owner_found ();
|
2020-07-12 02:52:55 +08:00
|
|
|
/* Fall through to owner_listen. */
|
2020-04-24 21:05:12 +08:00
|
|
|
}
|
2019-04-15 03:16:04 +08:00
|
|
|
}
|
2020-07-12 02:52:55 +08:00
|
|
|
|
|
|
|
owner_listen:
|
2020-07-12 02:55:39 +08:00
|
|
|
fifo_client_lock ();
|
|
|
|
cleanup_handlers ();
|
|
|
|
if (add_client_handler () < 0)
|
|
|
|
api_fatal ("Can't add a client handler, %E");
|
|
|
|
|
|
|
|
/* Listen for a writer to connect to the new client handler. */
|
|
|
|
fifo_client_handler& fc = fc_handler[nhandlers - 1];
|
|
|
|
fifo_client_unlock ();
|
|
|
|
shared_fc_handler_updated (false);
|
|
|
|
owner_unlock ();
|
|
|
|
NTSTATUS status;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
bool cancel = false;
|
|
|
|
bool update = false;
|
|
|
|
|
|
|
|
status = NtFsControlFile (fc.h, conn_evt, NULL, NULL, &io,
|
|
|
|
FSCTL_PIPE_LISTEN, NULL, 0, NULL, 0);
|
|
|
|
if (status == STATUS_PENDING)
|
|
|
|
{
|
|
|
|
HANDLE w[3] = { conn_evt, update_needed_evt, cancel_evt };
|
|
|
|
switch (WaitForMultipleObjects (3, w, false, INFINITE))
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
{
|
2020-07-12 02:55:39 +08:00
|
|
|
case WAIT_OBJECT_0:
|
|
|
|
status = io.Status;
|
|
|
|
debug_printf ("NtFsControlFile STATUS_PENDING, then %y",
|
|
|
|
status);
|
2019-04-15 03:16:04 +08:00
|
|
|
break;
|
2020-07-12 02:55:39 +08:00
|
|
|
case WAIT_OBJECT_0 + 1:
|
|
|
|
status = STATUS_WAIT_1;
|
|
|
|
update = true;
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
break;
|
2020-07-12 02:55:39 +08:00
|
|
|
case WAIT_OBJECT_0 + 2:
|
|
|
|
status = STATUS_THREAD_IS_TERMINATING;
|
|
|
|
cancel = true;
|
|
|
|
update = true;
|
2019-04-15 03:16:04 +08:00
|
|
|
break;
|
|
|
|
default:
|
2020-07-12 02:55:39 +08:00
|
|
|
api_fatal ("WFMO failed, %E");
|
2019-04-15 03:16:04 +08:00
|
|
|
}
|
2020-07-12 02:55:39 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
debug_printf ("NtFsControlFile status %y, no STATUS_PENDING",
|
|
|
|
status);
|
|
|
|
HANDLE ph = NULL;
|
|
|
|
NTSTATUS status1;
|
|
|
|
|
|
|
|
fifo_client_lock ();
|
2020-08-03 21:38:08 +08:00
|
|
|
if (fc.get_state () != fc_listening)
|
|
|
|
/* select.cc:peek_fifo has already recorded a connection. */
|
|
|
|
;
|
|
|
|
else
|
2020-07-12 02:55:39 +08:00
|
|
|
{
|
2020-08-03 21:43:36 +08:00
|
|
|
switch (status)
|
|
|
|
{
|
|
|
|
case STATUS_SUCCESS:
|
|
|
|
case STATUS_PIPE_CONNECTED:
|
|
|
|
record_connection (fc);
|
|
|
|
break;
|
|
|
|
case STATUS_PIPE_CLOSING:
|
|
|
|
debug_printf ("NtFsControlFile got STATUS_PIPE_CLOSING...");
|
|
|
|
/* Maybe a writer already connected, wrote, and closed.
|
|
|
|
Just query the O/S. */
|
|
|
|
fc.query_and_set_state ();
|
|
|
|
debug_printf ("...O/S reports state %d", fc.get_state ());
|
|
|
|
record_connection (fc, false);
|
|
|
|
break;
|
|
|
|
case STATUS_THREAD_IS_TERMINATING:
|
|
|
|
case STATUS_WAIT_1:
|
|
|
|
/* Try to connect a bogus client. Otherwise fc is still
|
|
|
|
listening, and the next connection might not get recorded. */
|
|
|
|
status1 = open_pipe (ph);
|
|
|
|
WaitForSingleObject (conn_evt, INFINITE);
|
|
|
|
if (NT_SUCCESS (status1))
|
|
|
|
/* Bogus cilent connected. */
|
|
|
|
delete_client_handler (nhandlers - 1);
|
|
|
|
else
|
|
|
|
/* Did a real client connect? */
|
|
|
|
switch (io.Status)
|
|
|
|
{
|
|
|
|
case STATUS_SUCCESS:
|
|
|
|
case STATUS_PIPE_CONNECTED:
|
|
|
|
record_connection (fc);
|
|
|
|
break;
|
|
|
|
case STATUS_PIPE_CLOSING:
|
|
|
|
debug_printf ("got STATUS_PIPE_CLOSING when trying to connect bogus client...");
|
|
|
|
fc.query_and_set_state ();
|
|
|
|
debug_printf ("...O/S reports state %d", fc.get_state ());
|
|
|
|
record_connection (fc, false);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
debug_printf ("NtFsControlFile status %y after failing to connect bogus client or real client", io.Status);
|
|
|
|
fc.set_state (fc_error);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
debug_printf ("NtFsControlFile got unexpected status %y", status);
|
|
|
|
fc.set_state (fc_error);
|
|
|
|
break;
|
|
|
|
}
|
2020-07-12 02:55:39 +08:00
|
|
|
}
|
|
|
|
if (ph)
|
|
|
|
NtClose (ph);
|
2020-08-01 01:55:17 +08:00
|
|
|
if (update)
|
|
|
|
{
|
|
|
|
owner_lock ();
|
|
|
|
if (get_owner () == me && update_shared_handlers () < 0)
|
|
|
|
api_fatal ("Can't update shared handlers, %E");
|
|
|
|
owner_unlock ();
|
|
|
|
}
|
2020-07-12 02:55:39 +08:00
|
|
|
fifo_client_unlock ();
|
|
|
|
if (cancel)
|
|
|
|
goto canceled;
|
2019-03-23 03:30:37 +08:00
|
|
|
}
|
2020-03-27 02:29:50 +08:00
|
|
|
canceled:
|
2020-04-30 06:53:05 +08:00
|
|
|
if (conn_evt)
|
|
|
|
NtClose (conn_evt);
|
2020-03-27 02:29:50 +08:00
|
|
|
/* automatically return the cygthread to the cygthread pool */
|
|
|
|
_my_tls._ctinfo->auto_release ();
|
2020-04-30 06:53:05 +08:00
|
|
|
return 0;
|
2019-03-23 03:30:37 +08:00
|
|
|
}
|
|
|
|
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
/* Return -1 on error and 0 or 1 on success. If ONLY_OPEN is true, we
|
|
|
|
expect the shared memory to exist, and we only try to open it. In
|
|
|
|
this case, we return 0 on success.
|
|
|
|
|
|
|
|
Otherwise, we create the shared memory if it doesn't exist, and we
|
|
|
|
return 1 if it already existed and we successfully open it. */
|
2020-03-18 00:29:56 +08:00
|
|
|
int
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
fhandler_fifo::create_shmem (bool only_open)
|
2020-03-18 00:29:56 +08:00
|
|
|
{
|
|
|
|
HANDLE sect;
|
|
|
|
OBJECT_ATTRIBUTES attr;
|
|
|
|
NTSTATUS status;
|
|
|
|
LARGE_INTEGER size = { .QuadPart = sizeof (fifo_shmem_t) };
|
|
|
|
SIZE_T viewsize = sizeof (fifo_shmem_t);
|
|
|
|
PVOID addr = NULL;
|
|
|
|
UNICODE_STRING uname;
|
|
|
|
WCHAR shmem_name[MAX_PATH];
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
bool already_exists = false;
|
2020-03-18 00:29:56 +08:00
|
|
|
|
|
|
|
__small_swprintf (shmem_name, L"fifo-shmem.%08x.%016X", get_dev (),
|
|
|
|
get_ino ());
|
|
|
|
RtlInitUnicodeString (&uname, shmem_name);
|
|
|
|
InitializeObjectAttributes (&attr, &uname, OBJ_INHERIT,
|
|
|
|
get_shared_parent_dir (), NULL);
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (!only_open)
|
|
|
|
{
|
|
|
|
status = NtCreateSection (§, STANDARD_RIGHTS_REQUIRED | SECTION_QUERY
|
|
|
|
| SECTION_MAP_READ | SECTION_MAP_WRITE,
|
|
|
|
&attr, &size, PAGE_READWRITE, SEC_COMMIT, NULL);
|
|
|
|
if (status == STATUS_OBJECT_NAME_COLLISION)
|
|
|
|
already_exists = true;
|
|
|
|
}
|
|
|
|
if (only_open || already_exists)
|
2020-03-18 00:29:56 +08:00
|
|
|
status = NtOpenSection (§, STANDARD_RIGHTS_REQUIRED | SECTION_QUERY
|
|
|
|
| SECTION_MAP_READ | SECTION_MAP_WRITE, &attr);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
status = NtMapViewOfSection (sect, NtCurrentProcess (), &addr, 0, viewsize,
|
|
|
|
NULL, &viewsize, ViewShare, 0, PAGE_READWRITE);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
NtClose (sect);
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shmem_handle = sect;
|
|
|
|
shmem = (fifo_shmem_t *) addr;
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
return already_exists ? 1 : 0;
|
2020-03-18 00:29:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* shmem_handle must be valid when this is called. */
|
|
|
|
int
|
|
|
|
fhandler_fifo::reopen_shmem ()
|
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
SIZE_T viewsize = sizeof (fifo_shmem_t);
|
|
|
|
PVOID addr = NULL;
|
|
|
|
|
|
|
|
status = NtMapViewOfSection (shmem_handle, NtCurrentProcess (), &addr,
|
|
|
|
0, viewsize, NULL, &viewsize, ViewShare,
|
|
|
|
0, PAGE_READWRITE);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shmem = (fifo_shmem_t *) addr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-24 06:43:42 +08:00
|
|
|
/* On first creation, map and commit one page of memory. */
|
|
|
|
int
|
|
|
|
fhandler_fifo::create_shared_fc_handler ()
|
|
|
|
{
|
|
|
|
HANDLE sect;
|
|
|
|
OBJECT_ATTRIBUTES attr;
|
|
|
|
NTSTATUS status;
|
|
|
|
LARGE_INTEGER size
|
|
|
|
= { .QuadPart = (LONGLONG) (SH_FC_HANDLER_PAGES * wincap.page_size ()) };
|
|
|
|
SIZE_T viewsize = get_shared_fc_handler_committed () ?: wincap.page_size ();
|
|
|
|
PVOID addr = NULL;
|
|
|
|
UNICODE_STRING uname;
|
|
|
|
WCHAR shared_fc_name[MAX_PATH];
|
|
|
|
|
|
|
|
__small_swprintf (shared_fc_name, L"fifo-shared-fc.%08x.%016X", get_dev (),
|
|
|
|
get_ino ());
|
|
|
|
RtlInitUnicodeString (&uname, shared_fc_name);
|
|
|
|
InitializeObjectAttributes (&attr, &uname, OBJ_INHERIT,
|
|
|
|
get_shared_parent_dir (), NULL);
|
|
|
|
status = NtCreateSection (§, STANDARD_RIGHTS_REQUIRED | SECTION_QUERY
|
|
|
|
| SECTION_MAP_READ | SECTION_MAP_WRITE, &attr,
|
|
|
|
&size, PAGE_READWRITE, SEC_RESERVE, NULL);
|
|
|
|
if (status == STATUS_OBJECT_NAME_COLLISION)
|
|
|
|
status = NtOpenSection (§, STANDARD_RIGHTS_REQUIRED | SECTION_QUERY
|
|
|
|
| SECTION_MAP_READ | SECTION_MAP_WRITE, &attr);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
status = NtMapViewOfSection (sect, NtCurrentProcess (), &addr, 0, viewsize,
|
|
|
|
NULL, &viewsize, ViewShare, 0, PAGE_READWRITE);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
NtClose (sect);
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shared_fc_hdl = sect;
|
|
|
|
shared_fc_handler = (fifo_client_handler *) addr;
|
|
|
|
if (!get_shared_fc_handler_committed ())
|
|
|
|
set_shared_fc_handler_committed (viewsize);
|
|
|
|
set_shared_shandlers (viewsize / sizeof (fifo_client_handler));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* shared_fc_hdl must be valid when this is called. */
|
|
|
|
int
|
|
|
|
fhandler_fifo::reopen_shared_fc_handler ()
|
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
SIZE_T viewsize = get_shared_fc_handler_committed ();
|
|
|
|
PVOID addr = NULL;
|
|
|
|
|
|
|
|
status = NtMapViewOfSection (shared_fc_hdl, NtCurrentProcess (),
|
|
|
|
&addr, 0, viewsize, NULL, &viewsize,
|
|
|
|
ViewShare, 0, PAGE_READWRITE);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shared_fc_handler = (fifo_client_handler *) addr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
fhandler_fifo::remap_shared_fc_handler (size_t nbytes)
|
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
SIZE_T viewsize = roundup2 (nbytes, wincap.page_size ());
|
|
|
|
PVOID addr = NULL;
|
|
|
|
|
|
|
|
if (viewsize > SH_FC_HANDLER_PAGES * wincap.page_size ())
|
|
|
|
{
|
|
|
|
set_errno (ENOMEM);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
NtUnmapViewOfSection (NtCurrentProcess (), shared_fc_handler);
|
|
|
|
status = NtMapViewOfSection (shared_fc_hdl, NtCurrentProcess (),
|
|
|
|
&addr, 0, viewsize, NULL, &viewsize,
|
|
|
|
ViewShare, 0, PAGE_READWRITE);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shared_fc_handler = (fifo_client_handler *) addr;
|
|
|
|
set_shared_fc_handler_committed (viewsize);
|
|
|
|
set_shared_shandlers (viewsize / sizeof (fc_handler[0]));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-09-20 08:31:13 +08:00
|
|
|
int
|
2007-07-08 01:00:33 +08:00
|
|
|
fhandler_fifo::open (int flags, mode_t)
|
2003-09-20 08:31:13 +08:00
|
|
|
{
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
int saved_errno = 0, shmem_res = 0;
|
2007-07-08 01:00:33 +08:00
|
|
|
|
2019-06-27 19:46:14 +08:00
|
|
|
if (flags & O_PATH)
|
2020-01-24 00:31:04 +08:00
|
|
|
return open_fs (flags);
|
2019-06-27 19:46:14 +08:00
|
|
|
|
2011-10-30 12:50:36 +08:00
|
|
|
/* Determine what we're doing with this fhandler: reading, writing, both */
|
|
|
|
switch (flags & O_ACCMODE)
|
2003-09-20 08:31:13 +08:00
|
|
|
{
|
2011-10-30 12:50:36 +08:00
|
|
|
case O_RDONLY:
|
2020-01-24 00:31:04 +08:00
|
|
|
reader = true;
|
2011-10-30 12:50:36 +08:00
|
|
|
break;
|
|
|
|
case O_WRONLY:
|
|
|
|
writer = true;
|
|
|
|
break;
|
|
|
|
case O_RDWR:
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
reader = writer = duplexer = true;
|
2011-10-30 12:50:36 +08:00
|
|
|
break;
|
|
|
|
default:
|
2007-07-08 01:00:33 +08:00
|
|
|
set_errno (EINVAL);
|
2020-03-18 02:14:47 +08:00
|
|
|
goto err;
|
2007-07-08 01:00:33 +08:00
|
|
|
}
|
2009-07-25 04:54:33 +08:00
|
|
|
|
2012-01-23 05:43:25 +08:00
|
|
|
debug_only_printf ("reader %d, writer %d, duplexer %d", reader, writer, duplexer);
|
2011-10-30 12:50:36 +08:00
|
|
|
set_flags (flags);
|
2019-03-26 07:06:10 +08:00
|
|
|
if (reader && !duplexer)
|
2019-03-23 03:30:37 +08:00
|
|
|
nohandle (true);
|
|
|
|
|
2019-03-23 03:30:36 +08:00
|
|
|
/* Create control events for this named pipe */
|
2011-10-30 12:50:36 +08:00
|
|
|
char char_sa_buf[1024];
|
|
|
|
LPSECURITY_ATTRIBUTES sa_buf;
|
|
|
|
sa_buf = sec_user_cloexec (flags & O_CLOEXEC, (PSECURITY_ATTRIBUTES) char_sa_buf,
|
|
|
|
cygheap->user.sid());
|
|
|
|
|
2019-03-23 03:30:36 +08:00
|
|
|
char npbuf[MAX_PATH];
|
|
|
|
__small_sprintf (npbuf, "r-event.%08x.%016X", get_dev (), get_ino ());
|
2020-03-18 02:14:47 +08:00
|
|
|
if (!(read_ready = CreateEvent (sa_buf, true, false, npbuf)))
|
2011-10-30 12:50:36 +08:00
|
|
|
{
|
2019-03-23 03:30:36 +08:00
|
|
|
debug_printf ("CreateEvent for %s failed, %E", npbuf);
|
2020-03-18 02:14:47 +08:00
|
|
|
__seterrno ();
|
|
|
|
goto err;
|
2011-10-30 12:50:36 +08:00
|
|
|
}
|
2019-03-23 03:30:36 +08:00
|
|
|
npbuf[0] = 'w';
|
2019-06-19 23:14:37 +08:00
|
|
|
if (!(write_ready = CreateEvent (sa_buf, true, false, npbuf)))
|
2007-07-08 01:00:33 +08:00
|
|
|
{
|
2019-03-23 03:30:36 +08:00
|
|
|
debug_printf ("CreateEvent for %s failed, %E", npbuf);
|
2020-03-18 02:14:47 +08:00
|
|
|
__seterrno ();
|
|
|
|
goto err_close_read_ready;
|
2011-10-30 12:50:36 +08:00
|
|
|
}
|
2020-04-26 21:38:46 +08:00
|
|
|
npbuf[0] = 'o';
|
|
|
|
if (!(writer_opening = CreateEvent (sa_buf, true, false, npbuf)))
|
|
|
|
{
|
|
|
|
debug_printf ("CreateEvent for %s failed, %E", npbuf);
|
2020-03-18 02:14:47 +08:00
|
|
|
__seterrno ();
|
|
|
|
goto err_close_write_ready;
|
2019-03-26 07:06:10 +08:00
|
|
|
}
|
|
|
|
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
/* If we're reading, create the shared memory and the shared
|
|
|
|
fc_handler memory, create some events, start the
|
|
|
|
fifo_reader_thread, signal read_ready, and wait for a writer. */
|
2019-03-23 03:30:36 +08:00
|
|
|
if (reader)
|
2011-10-30 12:50:36 +08:00
|
|
|
{
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
/* Create/open shared memory. */
|
|
|
|
if ((shmem_res = create_shmem ()) < 0)
|
2020-03-27 02:29:50 +08:00
|
|
|
goto err_close_writer_opening;
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
else if (shmem_res == 0)
|
|
|
|
debug_printf ("shmem created");
|
|
|
|
else
|
|
|
|
debug_printf ("shmem existed; ok if we're not the first reader");
|
2020-04-24 06:43:42 +08:00
|
|
|
if (create_shared_fc_handler () < 0)
|
|
|
|
goto err_close_shmem;
|
2020-04-24 21:05:12 +08:00
|
|
|
npbuf[0] = 'n';
|
|
|
|
if (!(owner_needed_evt = CreateEvent (sa_buf, true, false, npbuf)))
|
|
|
|
{
|
|
|
|
debug_printf ("CreateEvent for %s failed, %E", npbuf);
|
|
|
|
__seterrno ();
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
goto err_close_shared_fc_handler;
|
2020-04-24 21:05:12 +08:00
|
|
|
}
|
|
|
|
npbuf[0] = 'f';
|
|
|
|
if (!(owner_found_evt = CreateEvent (sa_buf, true, false, npbuf)))
|
|
|
|
{
|
|
|
|
debug_printf ("CreateEvent for %s failed, %E", npbuf);
|
|
|
|
__seterrno ();
|
|
|
|
goto err_close_owner_needed_evt;
|
|
|
|
}
|
2020-04-25 21:54:18 +08:00
|
|
|
npbuf[0] = 'u';
|
|
|
|
if (!(update_needed_evt = CreateEvent (sa_buf, false, false, npbuf)))
|
|
|
|
{
|
|
|
|
debug_printf ("CreateEvent for %s failed, %E", npbuf);
|
|
|
|
__seterrno ();
|
|
|
|
goto err_close_owner_found_evt;
|
|
|
|
}
|
2020-05-19 22:14:10 +08:00
|
|
|
if (!(cancel_evt = create_event ()))
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
goto err_close_update_needed_evt;
|
2020-05-19 22:14:10 +08:00
|
|
|
if (!(thr_sync_evt = create_event ()))
|
2020-03-27 02:29:50 +08:00
|
|
|
goto err_close_cancel_evt;
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
|
2020-03-26 07:22:10 +08:00
|
|
|
me.winpid = GetCurrentProcessId ();
|
|
|
|
me.fh = this;
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
nreaders_lock ();
|
|
|
|
if (inc_nreaders () == 1)
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
{
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
/* Reinitialize _sh_fc_handler_updated, which starts as 0. */
|
|
|
|
shared_fc_handler_updated (true);
|
|
|
|
set_owner (me);
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
}
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
new cygthread (fifo_reader_thread, this, "fifo_reader", thr_sync_evt);
|
|
|
|
SetEvent (read_ready);
|
|
|
|
nreaders_unlock ();
|
2020-03-18 02:14:47 +08:00
|
|
|
|
|
|
|
/* If we're a duplexer, we need a handle for writing. */
|
|
|
|
if (duplexer)
|
2019-04-20 23:41:12 +08:00
|
|
|
{
|
2020-03-18 02:14:47 +08:00
|
|
|
HANDLE ph = NULL;
|
|
|
|
NTSTATUS status;
|
|
|
|
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
nwriters_lock ();
|
|
|
|
inc_nwriters ();
|
|
|
|
SetEvent (write_ready);
|
|
|
|
nwriters_unlock ();
|
|
|
|
|
2020-03-18 02:14:47 +08:00
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
status = open_pipe (ph);
|
|
|
|
if (NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
set_handle (ph);
|
|
|
|
set_pipe_non_blocking (ph, flags & O_NONBLOCK);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (status == STATUS_OBJECT_NAME_NOT_FOUND)
|
|
|
|
{
|
|
|
|
/* The pipe hasn't been created yet. */
|
|
|
|
yield ();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
__seterrno_from_nt_status (status);
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
nohandle (true);
|
2020-03-18 02:14:47 +08:00
|
|
|
goto err_close_reader;
|
|
|
|
}
|
|
|
|
}
|
2019-04-20 23:41:12 +08:00
|
|
|
}
|
Cygwin: FIFO: support opening multiple readers
Although we can have multiple readers open because of dup/fork/exec,
the current code does not support multiple readers opening a FIFO by
explicitly calling 'open'.
The main complication in supporting this is that when a blocking
reader tries to open and there's already one open, it has to check
whether there any writers open. It can't rely on the write_ready
event, whose state hasn't changed since the first writer opened.
To fix this, add two new named events, check_write_ready_evt and
write_ready_ok_evt, and a new method, check_write_ready().
The first event signals the owner's reader thread to call
check_write_ready(), which polls the fc_handler list to check for
connected writers. If it finds none, it checks to see if there's a
writer in the process and then sets/resets write_ready appropriately.
When check_write_ready() finishes it sets write_ready_ok_evt to signal
the reader that write_ready has been updated.
The polling is done via fifo_client_handler::pipe_state(). As long as
it's calling that function anyway, check_write_ready() updates the
state of each handler.
Also add a new lock to prevent a race if two readers are trying to
open simultaneously.
2020-05-06 23:31:39 +08:00
|
|
|
/* Not a duplexer; wait for a writer to connect if we're blocking. */
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
else if (!wait (write_ready))
|
|
|
|
goto err_close_reader;
|
2020-03-18 02:14:47 +08:00
|
|
|
goto success;
|
2011-10-30 12:50:36 +08:00
|
|
|
}
|
|
|
|
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
/* If we're writing, wait for read_ready, connect to the pipe, open
|
|
|
|
the shared memory, and signal write_ready. */
|
2011-10-30 12:50:36 +08:00
|
|
|
if (writer)
|
|
|
|
{
|
2020-03-18 02:14:47 +08:00
|
|
|
NTSTATUS status;
|
|
|
|
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
/* Don't let a reader see EOF at this point. */
|
2020-04-26 21:38:46 +08:00
|
|
|
SetEvent (writer_opening);
|
2019-04-15 03:16:02 +08:00
|
|
|
while (1)
|
2019-03-23 03:30:37 +08:00
|
|
|
{
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (!wait (read_ready))
|
|
|
|
{
|
|
|
|
ResetEvent (writer_opening);
|
|
|
|
goto err_close_writer_opening;
|
|
|
|
}
|
2020-03-18 02:14:47 +08:00
|
|
|
status = open_pipe (get_handle ());
|
2019-04-15 03:16:02 +08:00
|
|
|
if (NT_SUCCESS (status))
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
goto writer_shmem;
|
2020-03-18 02:14:47 +08:00
|
|
|
else if (status == STATUS_OBJECT_NAME_NOT_FOUND)
|
2019-04-15 03:16:02 +08:00
|
|
|
{
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
/* The pipe hasn't been created yet or there's no longer
|
|
|
|
a reader open. */
|
2020-03-18 02:14:47 +08:00
|
|
|
yield ();
|
|
|
|
continue;
|
2019-04-15 03:16:02 +08:00
|
|
|
}
|
|
|
|
else if (STATUS_PIPE_NO_INSTANCE_AVAILABLE (status))
|
2020-03-18 02:14:47 +08:00
|
|
|
break;
|
2019-04-15 03:16:02 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
debug_printf ("create of writer failed");
|
|
|
|
__seterrno_from_nt_status (status);
|
2020-04-26 21:38:46 +08:00
|
|
|
ResetEvent (writer_opening);
|
2020-03-18 02:14:47 +08:00
|
|
|
goto err_close_writer_opening;
|
2019-04-15 03:16:02 +08:00
|
|
|
}
|
2019-03-23 03:30:37 +08:00
|
|
|
}
|
2020-03-18 02:14:47 +08:00
|
|
|
|
|
|
|
/* We should get here only if the system is heavily loaded
|
|
|
|
and/or many writers are trying to connect simultaneously */
|
|
|
|
while (1)
|
2020-04-26 21:38:46 +08:00
|
|
|
{
|
2020-03-18 02:14:47 +08:00
|
|
|
if (!wait (read_ready))
|
|
|
|
{
|
|
|
|
ResetEvent (writer_opening);
|
|
|
|
goto err_close_writer_opening;
|
|
|
|
}
|
|
|
|
status = wait_open_pipe (get_handle ());
|
|
|
|
if (NT_SUCCESS (status))
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
goto writer_shmem;
|
2020-03-18 02:14:47 +08:00
|
|
|
else if (status == STATUS_IO_TIMEOUT)
|
|
|
|
continue;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
debug_printf ("create of writer failed");
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
ResetEvent (writer_opening);
|
|
|
|
goto err_close_writer_opening;
|
|
|
|
}
|
2020-04-26 21:38:46 +08:00
|
|
|
}
|
2003-09-20 08:31:13 +08:00
|
|
|
}
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
writer_shmem:
|
|
|
|
if (create_shmem (true) < 0)
|
|
|
|
goto err_close_writer_opening;
|
|
|
|
/* writer_success: */
|
2020-03-18 02:14:47 +08:00
|
|
|
set_pipe_non_blocking (get_handle (), flags & O_NONBLOCK);
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
nwriters_lock ();
|
|
|
|
inc_nwriters ();
|
2020-03-18 02:14:47 +08:00
|
|
|
SetEvent (write_ready);
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
ResetEvent (writer_opening);
|
|
|
|
nwriters_unlock ();
|
2020-03-18 02:14:47 +08:00
|
|
|
success:
|
|
|
|
return 1;
|
|
|
|
err_close_reader:
|
|
|
|
saved_errno = get_errno ();
|
|
|
|
close ();
|
|
|
|
set_errno (saved_errno);
|
|
|
|
return 0;
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
/* err_close_thr_sync_evt: */
|
|
|
|
/* NtClose (thr_sync_evt); */
|
2020-03-27 02:29:50 +08:00
|
|
|
err_close_cancel_evt:
|
|
|
|
NtClose (cancel_evt);
|
2020-04-25 21:54:18 +08:00
|
|
|
err_close_update_needed_evt:
|
|
|
|
NtClose (update_needed_evt);
|
2020-04-24 21:05:12 +08:00
|
|
|
err_close_owner_found_evt:
|
|
|
|
NtClose (owner_found_evt);
|
|
|
|
err_close_owner_needed_evt:
|
|
|
|
NtClose (owner_needed_evt);
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
err_close_shared_fc_handler:
|
2020-04-24 06:43:42 +08:00
|
|
|
NtUnmapViewOfSection (NtCurrentProcess (), shared_fc_handler);
|
|
|
|
NtClose (shared_fc_hdl);
|
|
|
|
err_close_shmem:
|
2020-03-18 00:29:56 +08:00
|
|
|
NtUnmapViewOfSection (NtCurrentProcess (), shmem);
|
|
|
|
NtClose (shmem_handle);
|
2020-03-18 02:14:47 +08:00
|
|
|
err_close_writer_opening:
|
|
|
|
NtClose (writer_opening);
|
|
|
|
err_close_write_ready:
|
|
|
|
NtClose (write_ready);
|
|
|
|
err_close_read_ready:
|
|
|
|
NtClose (read_ready);
|
|
|
|
err:
|
|
|
|
if (get_handle ())
|
|
|
|
NtClose (get_handle ());
|
|
|
|
return 0;
|
2003-09-20 08:31:13 +08:00
|
|
|
}
|
|
|
|
|
2018-10-10 19:20:04 +08:00
|
|
|
off_t
|
|
|
|
fhandler_fifo::lseek (off_t offset, int whence)
|
|
|
|
{
|
|
|
|
debug_printf ("(%D, %d)", offset, whence);
|
|
|
|
set_errno (ESPIPE);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2007-07-08 01:00:33 +08:00
|
|
|
bool
|
2011-10-30 12:50:36 +08:00
|
|
|
fhandler_fifo::wait (HANDLE h)
|
2003-09-20 08:31:13 +08:00
|
|
|
{
|
2011-10-30 12:50:36 +08:00
|
|
|
#ifdef DEBUGGING
|
|
|
|
const char *what;
|
|
|
|
if (h == read_ready)
|
|
|
|
what = "reader";
|
|
|
|
else
|
2019-03-23 03:30:36 +08:00
|
|
|
what = "writer";
|
2011-10-30 12:50:36 +08:00
|
|
|
#endif
|
|
|
|
/* Set the wait to zero for non-blocking I/O-related events. */
|
|
|
|
DWORD wait = ((h == read_ready || h == write_ready)
|
|
|
|
&& get_flags () & O_NONBLOCK) ? 0 : INFINITE;
|
|
|
|
|
|
|
|
debug_only_printf ("waiting for %s", what);
|
|
|
|
/* Wait for the event. Set errno, as appropriate if something goes wrong. */
|
2011-12-10 00:02:56 +08:00
|
|
|
switch (cygwait (h, wait))
|
2006-06-23 08:19:39 +08:00
|
|
|
{
|
2011-10-30 12:50:36 +08:00
|
|
|
case WAIT_OBJECT_0:
|
|
|
|
debug_only_printf ("successfully waited for %s", what);
|
|
|
|
return true;
|
2012-06-18 04:50:24 +08:00
|
|
|
case WAIT_SIGNALED:
|
|
|
|
debug_only_printf ("interrupted by signal while waiting for %s", what);
|
|
|
|
set_errno (EINTR);
|
|
|
|
return false;
|
|
|
|
case WAIT_CANCELED:
|
|
|
|
debug_only_printf ("cancellable interruption while waiting for %s", what);
|
|
|
|
pthread::static_cancel_self (); /* never returns */
|
|
|
|
break;
|
2011-10-30 12:50:36 +08:00
|
|
|
case WAIT_TIMEOUT:
|
|
|
|
if (h == write_ready)
|
2009-07-25 04:54:33 +08:00
|
|
|
{
|
2011-10-30 12:50:36 +08:00
|
|
|
debug_only_printf ("wait timed out waiting for write but will still open reader since non-blocking mode");
|
|
|
|
return true;
|
2009-07-25 04:54:33 +08:00
|
|
|
}
|
2011-10-30 12:50:36 +08:00
|
|
|
else
|
2009-07-25 04:54:33 +08:00
|
|
|
{
|
2011-10-30 12:50:36 +08:00
|
|
|
set_errno (ENXIO);
|
2009-07-25 04:54:33 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
2011-10-30 12:50:36 +08:00
|
|
|
default:
|
|
|
|
debug_only_printf ("unknown error while waiting for %s", what);
|
|
|
|
__seterrno ();
|
|
|
|
return false;
|
|
|
|
}
|
2007-07-08 01:00:33 +08:00
|
|
|
}
|
2006-06-23 08:19:39 +08:00
|
|
|
|
2019-03-23 03:30:36 +08:00
|
|
|
ssize_t __reg3
|
|
|
|
fhandler_fifo::raw_write (const void *ptr, size_t len)
|
|
|
|
{
|
|
|
|
ssize_t ret = -1;
|
2019-06-08 23:05:39 +08:00
|
|
|
size_t nbytes = 0;
|
|
|
|
ULONG chunk;
|
2019-04-15 23:43:57 +08:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
2019-03-23 03:30:36 +08:00
|
|
|
IO_STATUS_BLOCK io;
|
2019-04-15 23:43:57 +08:00
|
|
|
HANDLE evt = NULL;
|
2019-03-23 03:30:36 +08:00
|
|
|
|
2019-06-08 23:05:39 +08:00
|
|
|
if (!len)
|
|
|
|
return 0;
|
|
|
|
|
2019-04-15 23:43:57 +08:00
|
|
|
if (len <= max_atomic_write)
|
|
|
|
chunk = len;
|
|
|
|
else if (is_nonblocking ())
|
|
|
|
chunk = len = max_atomic_write;
|
|
|
|
else
|
|
|
|
chunk = max_atomic_write;
|
|
|
|
|
|
|
|
/* Create a wait event if the FIFO is in blocking mode. */
|
|
|
|
if (!is_nonblocking () && !(evt = CreateEvent (NULL, false, false, NULL)))
|
2019-06-08 23:05:39 +08:00
|
|
|
{
|
|
|
|
__seterrno ();
|
|
|
|
return -1;
|
|
|
|
}
|
2019-04-15 23:43:57 +08:00
|
|
|
|
|
|
|
/* Write in chunks, accumulating a total. If there's an error, just
|
|
|
|
return the accumulated total unless the first write fails, in
|
|
|
|
which case return -1. */
|
|
|
|
while (nbytes < len)
|
2019-03-23 03:30:36 +08:00
|
|
|
{
|
2019-04-15 23:43:57 +08:00
|
|
|
ULONG_PTR nbytes_now = 0;
|
|
|
|
size_t left = len - nbytes;
|
2019-06-08 23:05:39 +08:00
|
|
|
ULONG len1;
|
|
|
|
DWORD waitret = WAIT_OBJECT_0;
|
|
|
|
|
2019-04-15 23:43:57 +08:00
|
|
|
if (left > chunk)
|
|
|
|
len1 = chunk;
|
2019-03-23 03:30:36 +08:00
|
|
|
else
|
2019-06-08 23:05:39 +08:00
|
|
|
len1 = (ULONG) left;
|
2019-04-15 23:43:57 +08:00
|
|
|
nbytes_now = 0;
|
|
|
|
status = NtWriteFile (get_handle (), evt, NULL, NULL, &io,
|
|
|
|
(PVOID) ptr, len1, NULL, NULL);
|
|
|
|
if (evt && status == STATUS_PENDING)
|
|
|
|
{
|
2019-06-08 23:05:39 +08:00
|
|
|
waitret = cygwait (evt);
|
|
|
|
if (waitret == WAIT_OBJECT_0)
|
|
|
|
status = io.Status;
|
2019-04-15 23:43:57 +08:00
|
|
|
}
|
2019-06-08 23:05:39 +08:00
|
|
|
if (waitret == WAIT_CANCELED)
|
|
|
|
status = STATUS_THREAD_CANCELED;
|
|
|
|
else if (waitret == WAIT_SIGNALED)
|
|
|
|
status = STATUS_THREAD_SIGNALED;
|
|
|
|
else if (isclosed ()) /* A signal handler might have closed the fd. */
|
|
|
|
{
|
|
|
|
if (waitret == WAIT_OBJECT_0)
|
|
|
|
set_errno (EBADF);
|
|
|
|
else
|
|
|
|
__seterrno ();
|
|
|
|
}
|
|
|
|
else if (NT_SUCCESS (status))
|
2019-04-15 23:43:57 +08:00
|
|
|
{
|
|
|
|
nbytes_now = io.Information;
|
|
|
|
/* NtWriteFile returns success with # of bytes written == 0
|
|
|
|
if writing on a non-blocking pipe fails because the pipe
|
|
|
|
buffer doesn't have sufficient space. */
|
|
|
|
if (nbytes_now == 0)
|
|
|
|
set_errno (EAGAIN);
|
|
|
|
ptr = ((char *) ptr) + chunk;
|
|
|
|
nbytes += nbytes_now;
|
|
|
|
}
|
|
|
|
else if (STATUS_PIPE_IS_CLOSED (status))
|
|
|
|
{
|
|
|
|
set_errno (EPIPE);
|
|
|
|
raise (SIGPIPE);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
__seterrno_from_nt_status (status);
|
|
|
|
if (nbytes_now == 0)
|
|
|
|
len = 0; /* Terminate loop. */
|
|
|
|
if (nbytes > 0)
|
|
|
|
ret = nbytes;
|
2019-03-23 03:30:36 +08:00
|
|
|
}
|
2019-04-15 23:43:57 +08:00
|
|
|
if (evt)
|
2020-03-17 22:36:34 +08:00
|
|
|
NtClose (evt);
|
2019-06-08 23:05:39 +08:00
|
|
|
if (status == STATUS_THREAD_SIGNALED && ret < 0)
|
2019-04-15 23:43:57 +08:00
|
|
|
set_errno (EINTR);
|
|
|
|
else if (status == STATUS_THREAD_CANCELED)
|
|
|
|
pthread::static_cancel_self ();
|
2019-03-23 03:30:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-03 21:17:06 +08:00
|
|
|
/* Called from raw_read and select.cc:peek_fifo. */
|
|
|
|
int
|
|
|
|
fhandler_fifo::take_ownership (DWORD timeout)
|
2020-04-25 21:54:18 +08:00
|
|
|
{
|
2020-08-03 21:17:06 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2020-04-25 21:54:18 +08:00
|
|
|
owner_lock ();
|
|
|
|
if (get_owner () == me)
|
|
|
|
{
|
|
|
|
owner_unlock ();
|
2020-08-03 21:17:06 +08:00
|
|
|
return 0;
|
2020-04-25 21:54:18 +08:00
|
|
|
}
|
|
|
|
set_pending_owner (me);
|
2020-07-13 06:11:38 +08:00
|
|
|
/* Wake up my fifo_reader_thread. */
|
2020-04-25 21:54:18 +08:00
|
|
|
owner_needed ();
|
2020-07-13 06:11:38 +08:00
|
|
|
if (get_owner ())
|
2020-08-03 04:38:24 +08:00
|
|
|
/* Wake up the owner and request an update of the shared fc_handlers. */
|
2020-07-13 06:11:38 +08:00
|
|
|
SetEvent (update_needed_evt);
|
2020-04-25 21:54:18 +08:00
|
|
|
owner_unlock ();
|
2020-07-13 19:01:57 +08:00
|
|
|
/* The reader threads should now do the transfer. */
|
2020-08-03 21:17:06 +08:00
|
|
|
switch (WaitForSingleObject (owner_found_evt, timeout))
|
2020-07-13 19:01:57 +08:00
|
|
|
{
|
2020-08-03 21:17:06 +08:00
|
|
|
case WAIT_OBJECT_0:
|
|
|
|
owner_lock ();
|
|
|
|
if (get_owner () != me)
|
|
|
|
{
|
|
|
|
debug_printf ("owner_found_evt signaled, but I'm not the owner");
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
owner_unlock ();
|
|
|
|
break;
|
|
|
|
case WAIT_TIMEOUT:
|
|
|
|
debug_printf ("timed out");
|
|
|
|
ret = -1;
|
2020-08-06 03:46:53 +08:00
|
|
|
break;
|
2020-08-03 21:17:06 +08:00
|
|
|
default:
|
|
|
|
debug_printf ("WFSO failed, %E");
|
|
|
|
ret = -1;
|
2020-08-06 03:46:53 +08:00
|
|
|
break;
|
2020-07-13 19:01:57 +08:00
|
|
|
}
|
2020-08-03 21:17:06 +08:00
|
|
|
return ret;
|
2020-04-25 21:54:18 +08:00
|
|
|
}
|
|
|
|
|
2013-05-01 09:20:37 +08:00
|
|
|
void __reg3
|
2009-03-27 23:04:42 +08:00
|
|
|
fhandler_fifo::raw_read (void *in_ptr, size_t& len)
|
2007-07-08 01:00:33 +08:00
|
|
|
{
|
2019-06-08 23:05:39 +08:00
|
|
|
if (!len)
|
|
|
|
return;
|
|
|
|
|
2019-03-23 03:30:36 +08:00
|
|
|
while (1)
|
2011-10-30 12:50:36 +08:00
|
|
|
{
|
2020-08-03 21:17:06 +08:00
|
|
|
int nconnected = 0;
|
|
|
|
|
2020-04-25 21:54:18 +08:00
|
|
|
/* No one else can take ownership while we hold the reading_lock. */
|
|
|
|
reading_lock ();
|
2020-08-03 21:17:06 +08:00
|
|
|
if (take_ownership (10) < 0)
|
|
|
|
goto maybe_retry;
|
2020-07-13 19:01:57 +08:00
|
|
|
|
2019-03-23 03:30:38 +08:00
|
|
|
fifo_client_lock ();
|
2020-08-03 21:44:31 +08:00
|
|
|
/* Poll the connected clients for input. Make three passes.
|
|
|
|
|
|
|
|
On the first pass, just try to read from the client from
|
|
|
|
which we last read successfully. This should minimize
|
|
|
|
interleaving of writes from different clients.
|
|
|
|
|
|
|
|
On the second pass, just try to read from the clients in the
|
|
|
|
state fc_input_avail. This should be more efficient if
|
|
|
|
select has been called and detected input available.
|
|
|
|
|
|
|
|
On the third pass, try to read from all connected clients. */
|
|
|
|
|
2020-07-12 02:34:24 +08:00
|
|
|
/* First pass. */
|
|
|
|
int j;
|
|
|
|
for (j = 0; j < nhandlers; j++)
|
|
|
|
if (fc_handler[j].last_read)
|
|
|
|
break;
|
2020-08-03 21:44:31 +08:00
|
|
|
if (j < nhandlers && fc_handler[j].get_state () < fc_connected)
|
|
|
|
{
|
|
|
|
fc_handler[j].last_read = false;
|
|
|
|
j = nhandlers;
|
|
|
|
}
|
|
|
|
if (j < nhandlers)
|
2020-07-12 02:34:24 +08:00
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
|
|
|
|
status = NtReadFile (fc_handler[j].h, NULL, NULL, NULL,
|
|
|
|
&io, in_ptr, len, NULL, NULL);
|
|
|
|
switch (status)
|
|
|
|
{
|
|
|
|
case STATUS_SUCCESS:
|
|
|
|
case STATUS_BUFFER_OVERFLOW:
|
|
|
|
/* io.Information is supposedly valid in latter case. */
|
|
|
|
if (io.Information > 0)
|
|
|
|
{
|
|
|
|
len = io.Information;
|
|
|
|
fifo_client_unlock ();
|
|
|
|
reading_unlock ();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case STATUS_PIPE_EMPTY:
|
2020-08-03 21:44:31 +08:00
|
|
|
/* Update state in case it's fc_input_avail. */
|
|
|
|
fc_handler[j].set_state (fc_connected);
|
2020-07-12 02:34:24 +08:00
|
|
|
break;
|
|
|
|
case STATUS_PIPE_BROKEN:
|
2020-08-03 21:32:30 +08:00
|
|
|
fc_handler[j].set_state (fc_disconnected);
|
2020-07-12 02:34:24 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
debug_printf ("NtReadFile status %y", status);
|
2020-08-03 21:32:30 +08:00
|
|
|
fc_handler[j].set_state (fc_error);
|
2020-07-12 02:34:24 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Second pass. */
|
2020-08-03 21:44:31 +08:00
|
|
|
for (int i = 0; i < nhandlers; i++)
|
|
|
|
if (fc_handler[i].get_state () == fc_input_avail)
|
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
|
|
|
|
status = NtReadFile (fc_handler[i].h, NULL, NULL, NULL,
|
|
|
|
&io, in_ptr, len, NULL, NULL);
|
|
|
|
switch (status)
|
|
|
|
{
|
|
|
|
case STATUS_SUCCESS:
|
|
|
|
case STATUS_BUFFER_OVERFLOW:
|
|
|
|
if (io.Information > 0)
|
|
|
|
{
|
|
|
|
len = io.Information;
|
|
|
|
if (j < nhandlers)
|
|
|
|
fc_handler[j].last_read = false;
|
|
|
|
fc_handler[i].last_read = true;
|
|
|
|
fifo_client_unlock ();
|
|
|
|
reading_unlock ();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case STATUS_PIPE_EMPTY:
|
|
|
|
/* No input available after all. */
|
|
|
|
fc_handler[i].set_state (fc_connected);
|
|
|
|
break;
|
|
|
|
case STATUS_PIPE_BROKEN:
|
|
|
|
fc_handler[i].set_state (fc_disconnected);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
debug_printf ("NtReadFile status %y", status);
|
|
|
|
fc_handler[i].set_state (fc_error);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Third pass. */
|
2019-04-15 03:15:56 +08:00
|
|
|
for (int i = 0; i < nhandlers; i++)
|
2020-08-03 21:35:00 +08:00
|
|
|
if (fc_handler[i].get_state () >= fc_connected)
|
2019-03-23 03:30:37 +08:00
|
|
|
{
|
2019-06-22 05:33:30 +08:00
|
|
|
NTSTATUS status;
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
|
2020-04-26 21:38:46 +08:00
|
|
|
nconnected++;
|
|
|
|
status = NtReadFile (fc_handler[i].h, NULL, NULL, NULL,
|
2019-06-22 05:33:30 +08:00
|
|
|
&io, in_ptr, len, NULL, NULL);
|
|
|
|
switch (status)
|
2019-03-23 03:30:37 +08:00
|
|
|
{
|
2019-06-22 05:33:30 +08:00
|
|
|
case STATUS_SUCCESS:
|
|
|
|
case STATUS_BUFFER_OVERFLOW:
|
2020-07-12 02:34:24 +08:00
|
|
|
if (io.Information > 0)
|
2019-06-22 05:33:30 +08:00
|
|
|
{
|
2020-07-12 02:34:24 +08:00
|
|
|
len = io.Information;
|
2020-08-03 21:44:31 +08:00
|
|
|
if (j < nhandlers)
|
|
|
|
fc_handler[j].last_read = false;
|
2020-07-12 02:34:24 +08:00
|
|
|
fc_handler[i].last_read = true;
|
2019-06-22 05:33:30 +08:00
|
|
|
fifo_client_unlock ();
|
2020-04-25 21:54:18 +08:00
|
|
|
reading_unlock ();
|
2019-06-22 05:33:30 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case STATUS_PIPE_EMPTY:
|
|
|
|
break;
|
|
|
|
case STATUS_PIPE_BROKEN:
|
2020-08-03 21:32:30 +08:00
|
|
|
fc_handler[i].set_state (fc_disconnected);
|
2019-06-22 05:33:30 +08:00
|
|
|
nconnected--;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
debug_printf ("NtReadFile status %y", status);
|
2020-08-03 21:32:30 +08:00
|
|
|
fc_handler[i].set_state (fc_error);
|
2019-03-23 03:30:37 +08:00
|
|
|
nconnected--;
|
2020-05-07 06:39:26 +08:00
|
|
|
break;
|
2019-03-23 03:30:37 +08:00
|
|
|
}
|
|
|
|
}
|
2019-03-23 03:30:38 +08:00
|
|
|
fifo_client_unlock ();
|
2020-07-12 02:23:11 +08:00
|
|
|
if (!nconnected && hit_eof ())
|
2020-04-26 21:38:46 +08:00
|
|
|
{
|
2020-04-25 21:54:18 +08:00
|
|
|
reading_unlock ();
|
2020-04-26 21:38:46 +08:00
|
|
|
len = 0;
|
|
|
|
return;
|
|
|
|
}
|
2020-08-03 21:17:06 +08:00
|
|
|
maybe_retry:
|
2020-04-25 21:54:18 +08:00
|
|
|
reading_unlock ();
|
2019-03-23 03:30:36 +08:00
|
|
|
if (is_nonblocking ())
|
2011-10-30 12:50:36 +08:00
|
|
|
{
|
2019-03-23 03:30:36 +08:00
|
|
|
set_errno (EAGAIN);
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2020-05-11 21:03:37 +08:00
|
|
|
/* Allow interruption and don't hog the CPU. */
|
|
|
|
DWORD waitret = cygwait (NULL, 1, cw_cancel | cw_sig_eintr);
|
2019-06-08 23:05:39 +08:00
|
|
|
if (waitret == WAIT_CANCELED)
|
|
|
|
pthread::static_cancel_self ();
|
|
|
|
else if (waitret == WAIT_SIGNALED)
|
2019-03-23 03:30:36 +08:00
|
|
|
{
|
2019-06-08 23:05:39 +08:00
|
|
|
if (_my_tls.call_signal_handler ())
|
|
|
|
continue;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
set_errno (EINTR);
|
|
|
|
goto errout;
|
|
|
|
}
|
2019-03-23 03:30:36 +08:00
|
|
|
}
|
2011-10-30 12:50:36 +08:00
|
|
|
}
|
2019-06-08 23:05:39 +08:00
|
|
|
/* We might have been closed by a signal handler or another thread. */
|
|
|
|
if (isclosed ())
|
|
|
|
{
|
|
|
|
set_errno (EBADF);
|
|
|
|
goto errout;
|
|
|
|
}
|
2011-10-30 12:50:36 +08:00
|
|
|
}
|
|
|
|
errout:
|
2019-06-08 23:05:39 +08:00
|
|
|
len = (size_t) -1;
|
2003-09-20 08:31:13 +08:00
|
|
|
}
|
2007-02-27 20:58:56 +08:00
|
|
|
|
2013-01-21 12:34:52 +08:00
|
|
|
int __reg2
|
2007-02-27 20:58:56 +08:00
|
|
|
fhandler_fifo::fstatvfs (struct statvfs *sfs)
|
|
|
|
{
|
2020-01-25 04:09:03 +08:00
|
|
|
if (get_flags () & O_PATH)
|
|
|
|
/* We already have a handle. */
|
|
|
|
{
|
|
|
|
HANDLE h = get_handle ();
|
|
|
|
if (h)
|
|
|
|
return fstatvfs_by_handle (h, sfs);
|
|
|
|
}
|
|
|
|
|
2007-12-04 21:29:44 +08:00
|
|
|
fhandler_disk_file fh (pc);
|
|
|
|
fh.get_device () = FH_FS;
|
|
|
|
return fh.fstatvfs (sfs);
|
2007-02-27 20:58:56 +08:00
|
|
|
}
|
2009-07-25 04:54:33 +08:00
|
|
|
|
2020-04-24 06:43:42 +08:00
|
|
|
void
|
|
|
|
fhandler_fifo::close_all_handlers ()
|
|
|
|
{
|
2020-08-01 01:55:17 +08:00
|
|
|
fifo_client_lock ();
|
2020-04-24 06:43:42 +08:00
|
|
|
for (int i = 0; i < nhandlers; i++)
|
|
|
|
fc_handler[i].close ();
|
|
|
|
nhandlers = 0;
|
2020-08-01 01:55:17 +08:00
|
|
|
fifo_client_unlock ();
|
2020-04-24 06:43:42 +08:00
|
|
|
}
|
|
|
|
|
2020-08-03 21:32:30 +08:00
|
|
|
/* Return previous state. */
|
2020-05-10 05:25:39 +08:00
|
|
|
fifo_client_connect_state
|
2020-08-03 21:32:30 +08:00
|
|
|
fifo_client_handler::query_and_set_state ()
|
2019-06-22 06:49:11 +08:00
|
|
|
{
|
|
|
|
IO_STATUS_BLOCK io;
|
|
|
|
FILE_PIPE_LOCAL_INFORMATION fpli;
|
|
|
|
NTSTATUS status;
|
2020-08-03 21:32:30 +08:00
|
|
|
fifo_client_connect_state prev_state = get_state ();
|
2019-06-22 06:49:11 +08:00
|
|
|
|
2020-05-10 05:25:39 +08:00
|
|
|
if (!h)
|
2020-08-03 21:32:30 +08:00
|
|
|
{
|
|
|
|
set_state (fc_unknown);
|
|
|
|
goto out;
|
|
|
|
}
|
2020-05-10 05:25:39 +08:00
|
|
|
|
2020-03-17 06:04:28 +08:00
|
|
|
status = NtQueryInformationFile (h, &io, &fpli,
|
2019-06-22 06:49:11 +08:00
|
|
|
sizeof (fpli), FilePipeLocalInformation);
|
|
|
|
if (!NT_SUCCESS (status))
|
|
|
|
{
|
|
|
|
debug_printf ("NtQueryInformationFile status %y", status);
|
2020-08-03 21:32:30 +08:00
|
|
|
set_state (fc_error);
|
2019-06-22 06:49:11 +08:00
|
|
|
}
|
|
|
|
else if (fpli.ReadDataAvailable > 0)
|
2020-08-03 21:32:30 +08:00
|
|
|
set_state (fc_input_avail);
|
2019-06-22 06:49:11 +08:00
|
|
|
else
|
2020-05-10 05:25:39 +08:00
|
|
|
switch (fpli.NamedPipeState)
|
|
|
|
{
|
|
|
|
case FILE_PIPE_DISCONNECTED_STATE:
|
2020-08-03 21:32:30 +08:00
|
|
|
set_state (fc_disconnected);
|
2020-05-10 05:25:39 +08:00
|
|
|
break;
|
|
|
|
case FILE_PIPE_LISTENING_STATE:
|
2020-08-03 21:32:30 +08:00
|
|
|
set_state (fc_listening);
|
2020-05-10 05:25:39 +08:00
|
|
|
break;
|
|
|
|
case FILE_PIPE_CONNECTED_STATE:
|
2020-08-03 21:32:30 +08:00
|
|
|
set_state (fc_connected);
|
2020-05-10 05:25:39 +08:00
|
|
|
break;
|
|
|
|
case FILE_PIPE_CLOSING_STATE:
|
2020-08-03 21:32:30 +08:00
|
|
|
set_state (fc_closing);
|
2020-05-10 05:25:39 +08:00
|
|
|
break;
|
|
|
|
default:
|
2020-08-03 21:32:30 +08:00
|
|
|
set_state (fc_error);
|
2020-05-10 05:25:39 +08:00
|
|
|
break;
|
|
|
|
}
|
2020-08-03 21:32:30 +08:00
|
|
|
out:
|
|
|
|
return prev_state;
|
2019-06-22 06:49:11 +08:00
|
|
|
}
|
|
|
|
|
2020-04-30 06:53:05 +08:00
|
|
|
void
|
2020-03-27 02:29:50 +08:00
|
|
|
fhandler_fifo::cancel_reader_thread ()
|
2009-07-25 04:54:33 +08:00
|
|
|
{
|
2020-03-27 02:29:50 +08:00
|
|
|
if (cancel_evt)
|
|
|
|
SetEvent (cancel_evt);
|
|
|
|
if (thr_sync_evt)
|
|
|
|
WaitForSingleObject (thr_sync_evt, INFINITE);
|
2019-04-15 03:16:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
fhandler_fifo::close ()
|
|
|
|
{
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (writer)
|
|
|
|
{
|
|
|
|
nwriters_lock ();
|
|
|
|
if (dec_nwriters () == 0)
|
|
|
|
ResetEvent (write_ready);
|
|
|
|
nwriters_unlock ();
|
|
|
|
}
|
2020-03-18 02:14:47 +08:00
|
|
|
if (reader)
|
2020-03-27 02:29:50 +08:00
|
|
|
{
|
2020-07-15 21:46:42 +08:00
|
|
|
/* If we're the owner, we can't close our fc_handlers if a new
|
|
|
|
owner might need to duplicate them. */
|
|
|
|
bool close_fc_ok = false;
|
2020-04-24 21:05:12 +08:00
|
|
|
|
2020-03-27 02:29:50 +08:00
|
|
|
cancel_reader_thread ();
|
2020-07-15 21:46:42 +08:00
|
|
|
nreaders_lock ();
|
|
|
|
if (dec_nreaders () == 0)
|
2020-04-24 21:05:12 +08:00
|
|
|
{
|
2020-07-15 21:46:42 +08:00
|
|
|
close_fc_ok = true;
|
|
|
|
ResetEvent (read_ready);
|
|
|
|
ResetEvent (owner_needed_evt);
|
|
|
|
ResetEvent (owner_found_evt);
|
2020-04-24 21:05:12 +08:00
|
|
|
set_owner (null_fr_id);
|
2020-07-15 21:46:42 +08:00
|
|
|
set_prev_owner (null_fr_id);
|
|
|
|
set_pending_owner (null_fr_id);
|
|
|
|
set_shared_nhandlers (0);
|
2020-04-24 21:05:12 +08:00
|
|
|
}
|
2020-07-15 21:46:42 +08:00
|
|
|
else
|
2020-04-24 21:05:12 +08:00
|
|
|
{
|
2020-07-15 21:46:42 +08:00
|
|
|
owner_lock ();
|
|
|
|
if (get_owner () != me)
|
|
|
|
close_fc_ok = true;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
set_owner (null_fr_id);
|
|
|
|
set_prev_owner (me);
|
|
|
|
if (!get_pending_owner ())
|
|
|
|
owner_needed ();
|
|
|
|
}
|
|
|
|
owner_unlock ();
|
|
|
|
}
|
|
|
|
nreaders_unlock ();
|
|
|
|
while (!close_fc_ok)
|
|
|
|
{
|
|
|
|
if (WaitForSingleObject (owner_found_evt, 1) == WAIT_OBJECT_0)
|
|
|
|
close_fc_ok = true;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
nreaders_lock ();
|
|
|
|
if (!nreaders ())
|
|
|
|
{
|
|
|
|
close_fc_ok = true;
|
|
|
|
nreaders_unlock ();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
nreaders_unlock ();
|
|
|
|
owner_lock ();
|
|
|
|
if (get_owner () || get_prev_owner () != me)
|
|
|
|
close_fc_ok = true;
|
|
|
|
owner_unlock ();
|
|
|
|
}
|
|
|
|
}
|
2020-04-24 21:05:12 +08:00
|
|
|
}
|
|
|
|
close_all_handlers ();
|
|
|
|
if (fc_handler)
|
|
|
|
free (fc_handler);
|
|
|
|
if (owner_needed_evt)
|
|
|
|
NtClose (owner_needed_evt);
|
|
|
|
if (owner_found_evt)
|
|
|
|
NtClose (owner_found_evt);
|
2020-04-25 21:54:18 +08:00
|
|
|
if (update_needed_evt)
|
|
|
|
NtClose (update_needed_evt);
|
2020-03-27 02:29:50 +08:00
|
|
|
if (cancel_evt)
|
|
|
|
NtClose (cancel_evt);
|
|
|
|
if (thr_sync_evt)
|
|
|
|
NtClose (thr_sync_evt);
|
2020-04-24 06:43:42 +08:00
|
|
|
if (shared_fc_handler)
|
|
|
|
NtUnmapViewOfSection (NtCurrentProcess (), shared_fc_handler);
|
|
|
|
if (shared_fc_hdl)
|
|
|
|
NtClose (shared_fc_hdl);
|
2020-03-27 02:29:50 +08:00
|
|
|
}
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (shmem)
|
|
|
|
NtUnmapViewOfSection (NtCurrentProcess (), shmem);
|
|
|
|
if (shmem_handle)
|
|
|
|
NtClose (shmem_handle);
|
2019-03-23 03:30:37 +08:00
|
|
|
if (read_ready)
|
2020-03-17 22:36:34 +08:00
|
|
|
NtClose (read_ready);
|
2019-03-23 03:30:37 +08:00
|
|
|
if (write_ready)
|
2020-03-17 22:36:34 +08:00
|
|
|
NtClose (write_ready);
|
2020-04-26 21:38:46 +08:00
|
|
|
if (writer_opening)
|
|
|
|
NtClose (writer_opening);
|
2020-04-24 21:05:12 +08:00
|
|
|
if (nohandle ())
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return fhandler_base::close ();
|
2009-07-25 04:54:33 +08:00
|
|
|
}
|
|
|
|
|
2019-05-09 23:23:44 +08:00
|
|
|
/* If we have a write handle (i.e., we're a duplexer or a writer),
|
|
|
|
keep the nonblocking state of the windows pipe in sync with our
|
|
|
|
nonblocking state. */
|
2019-04-26 06:21:11 +08:00
|
|
|
int
|
|
|
|
fhandler_fifo::fcntl (int cmd, intptr_t arg)
|
|
|
|
{
|
2020-01-24 00:31:05 +08:00
|
|
|
if (cmd != F_SETFL || nohandle () || (get_flags () & O_PATH))
|
2019-04-26 06:21:11 +08:00
|
|
|
return fhandler_base::fcntl (cmd, arg);
|
|
|
|
|
|
|
|
const bool was_nonblocking = is_nonblocking ();
|
|
|
|
int res = fhandler_base::fcntl (cmd, arg);
|
|
|
|
const bool now_nonblocking = is_nonblocking ();
|
|
|
|
if (now_nonblocking != was_nonblocking)
|
|
|
|
set_pipe_non_blocking (get_handle (), now_nonblocking);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2009-07-25 04:54:33 +08:00
|
|
|
int
|
2011-10-16 06:37:30 +08:00
|
|
|
fhandler_fifo::dup (fhandler_base *child, int flags)
|
2009-07-25 04:54:33 +08:00
|
|
|
{
|
2019-04-15 03:16:03 +08:00
|
|
|
fhandler_fifo *fhf = NULL;
|
|
|
|
|
2020-01-24 00:31:05 +08:00
|
|
|
if (get_flags () & O_PATH)
|
|
|
|
return fhandler_base::dup (child, flags);
|
|
|
|
|
2019-03-23 03:30:36 +08:00
|
|
|
if (fhandler_base::dup (child, flags))
|
2020-03-25 21:31:29 +08:00
|
|
|
goto err;
|
2019-04-15 03:16:03 +08:00
|
|
|
|
|
|
|
fhf = (fhandler_fifo *) child;
|
2011-10-30 12:50:36 +08:00
|
|
|
if (!DuplicateHandle (GetCurrentProcess (), read_ready,
|
|
|
|
GetCurrentProcess (), &fhf->read_ready,
|
2020-03-25 21:31:29 +08:00
|
|
|
0, !(flags & O_CLOEXEC), DUPLICATE_SAME_ACCESS))
|
2011-10-30 12:50:36 +08:00
|
|
|
{
|
|
|
|
__seterrno ();
|
2020-03-25 21:31:29 +08:00
|
|
|
goto err;
|
2011-10-30 12:50:36 +08:00
|
|
|
}
|
|
|
|
if (!DuplicateHandle (GetCurrentProcess (), write_ready,
|
|
|
|
GetCurrentProcess (), &fhf->write_ready,
|
2020-03-25 21:31:29 +08:00
|
|
|
0, !(flags & O_CLOEXEC), DUPLICATE_SAME_ACCESS))
|
2011-10-30 12:50:36 +08:00
|
|
|
{
|
|
|
|
__seterrno ();
|
2020-03-25 21:31:29 +08:00
|
|
|
goto err_close_read_ready;
|
2009-07-25 04:54:33 +08:00
|
|
|
}
|
2020-04-26 21:38:46 +08:00
|
|
|
if (!DuplicateHandle (GetCurrentProcess (), writer_opening,
|
|
|
|
GetCurrentProcess (), &fhf->writer_opening,
|
|
|
|
0, !(flags & O_CLOEXEC), DUPLICATE_SAME_ACCESS))
|
|
|
|
{
|
|
|
|
__seterrno ();
|
|
|
|
goto err_close_write_ready;
|
|
|
|
}
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (!DuplicateHandle (GetCurrentProcess (), shmem_handle,
|
|
|
|
GetCurrentProcess (), &fhf->shmem_handle,
|
|
|
|
0, !(flags & O_CLOEXEC), DUPLICATE_SAME_ACCESS))
|
|
|
|
{
|
|
|
|
__seterrno ();
|
|
|
|
goto err_close_writer_opening;
|
|
|
|
}
|
|
|
|
if (fhf->reopen_shmem () < 0)
|
|
|
|
goto err_close_shmem_handle;
|
2020-03-25 21:31:29 +08:00
|
|
|
if (reader)
|
2019-03-23 03:30:39 +08:00
|
|
|
{
|
2020-05-04 23:36:20 +08:00
|
|
|
/* Make sure the child starts unlocked. */
|
|
|
|
fhf->fifo_client_unlock ();
|
|
|
|
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
/* Clear fc_handler list; the child never starts as owner. */
|
2020-04-03 01:47:18 +08:00
|
|
|
fhf->nhandlers = fhf->shandlers = 0;
|
|
|
|
fhf->fc_handler = NULL;
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
|
2020-04-24 06:43:42 +08:00
|
|
|
if (!DuplicateHandle (GetCurrentProcess (), shared_fc_hdl,
|
|
|
|
GetCurrentProcess (), &fhf->shared_fc_hdl,
|
|
|
|
0, !(flags & O_CLOEXEC), DUPLICATE_SAME_ACCESS))
|
|
|
|
{
|
|
|
|
__seterrno ();
|
|
|
|
goto err_close_shmem;
|
|
|
|
}
|
|
|
|
if (fhf->reopen_shared_fc_handler () < 0)
|
|
|
|
goto err_close_shared_fc_hdl;
|
2020-04-24 21:05:12 +08:00
|
|
|
if (!DuplicateHandle (GetCurrentProcess (), owner_needed_evt,
|
|
|
|
GetCurrentProcess (), &fhf->owner_needed_evt,
|
|
|
|
0, !(flags & O_CLOEXEC), DUPLICATE_SAME_ACCESS))
|
|
|
|
{
|
|
|
|
__seterrno ();
|
|
|
|
goto err_close_shared_fc_handler;
|
|
|
|
}
|
|
|
|
if (!DuplicateHandle (GetCurrentProcess (), owner_found_evt,
|
|
|
|
GetCurrentProcess (), &fhf->owner_found_evt,
|
|
|
|
0, !(flags & O_CLOEXEC), DUPLICATE_SAME_ACCESS))
|
|
|
|
{
|
|
|
|
__seterrno ();
|
|
|
|
goto err_close_owner_needed_evt;
|
|
|
|
}
|
2020-04-25 21:54:18 +08:00
|
|
|
if (!DuplicateHandle (GetCurrentProcess (), update_needed_evt,
|
|
|
|
GetCurrentProcess (), &fhf->update_needed_evt,
|
|
|
|
0, !(flags & O_CLOEXEC), DUPLICATE_SAME_ACCESS))
|
|
|
|
{
|
|
|
|
__seterrno ();
|
|
|
|
goto err_close_owner_found_evt;
|
|
|
|
}
|
2020-05-19 22:14:10 +08:00
|
|
|
if (!(fhf->cancel_evt = create_event ()))
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
goto err_close_update_needed_evt;
|
2020-05-19 22:14:10 +08:00
|
|
|
if (!(fhf->thr_sync_evt = create_event ()))
|
2020-03-27 02:29:50 +08:00
|
|
|
goto err_close_cancel_evt;
|
2020-03-27 21:43:30 +08:00
|
|
|
inc_nreaders ();
|
2020-03-26 07:22:10 +08:00
|
|
|
fhf->me.fh = fhf;
|
2020-03-27 21:43:30 +08:00
|
|
|
new cygthread (fifo_reader_thread, fhf, "fifo_reader", fhf->thr_sync_evt);
|
2019-03-23 03:30:39 +08:00
|
|
|
}
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (writer)
|
|
|
|
inc_nwriters ();
|
2020-03-25 21:31:29 +08:00
|
|
|
return 0;
|
2020-03-27 02:29:50 +08:00
|
|
|
err_close_cancel_evt:
|
|
|
|
NtClose (fhf->cancel_evt);
|
2020-04-25 21:54:18 +08:00
|
|
|
err_close_update_needed_evt:
|
|
|
|
NtClose (fhf->update_needed_evt);
|
2020-04-24 21:05:12 +08:00
|
|
|
err_close_owner_found_evt:
|
|
|
|
NtClose (fhf->owner_found_evt);
|
|
|
|
err_close_owner_needed_evt:
|
|
|
|
NtClose (fhf->owner_needed_evt);
|
2020-04-24 06:43:42 +08:00
|
|
|
err_close_shared_fc_handler:
|
|
|
|
NtUnmapViewOfSection (GetCurrentProcess (), fhf->shared_fc_handler);
|
|
|
|
err_close_shared_fc_hdl:
|
|
|
|
NtClose (fhf->shared_fc_hdl);
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
err_close_shmem:
|
2020-03-18 00:29:56 +08:00
|
|
|
NtUnmapViewOfSection (GetCurrentProcess (), fhf->shmem);
|
|
|
|
err_close_shmem_handle:
|
|
|
|
NtClose (fhf->shmem_handle);
|
|
|
|
err_close_writer_opening:
|
2020-04-26 21:38:46 +08:00
|
|
|
NtClose (fhf->writer_opening);
|
|
|
|
err_close_write_ready:
|
2020-03-25 21:31:29 +08:00
|
|
|
NtClose (fhf->write_ready);
|
|
|
|
err_close_read_ready:
|
|
|
|
NtClose (fhf->read_ready);
|
|
|
|
err:
|
|
|
|
return -1;
|
2011-10-30 12:50:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
fhandler_fifo::fixup_after_fork (HANDLE parent)
|
|
|
|
{
|
2019-03-23 03:30:36 +08:00
|
|
|
fhandler_base::fixup_after_fork (parent);
|
2011-10-30 12:50:36 +08:00
|
|
|
fork_fixup (parent, read_ready, "read_ready");
|
|
|
|
fork_fixup (parent, write_ready, "write_ready");
|
2020-04-26 21:38:46 +08:00
|
|
|
fork_fixup (parent, writer_opening, "writer_opening");
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
fork_fixup (parent, shmem_handle, "shmem_handle");
|
|
|
|
if (reopen_shmem () < 0)
|
|
|
|
api_fatal ("Can't reopen shared memory during fork, %E");
|
2020-05-04 23:36:20 +08:00
|
|
|
if (reader)
|
|
|
|
{
|
|
|
|
/* Make sure the child starts unlocked. */
|
|
|
|
fifo_client_unlock ();
|
|
|
|
|
2020-04-24 06:43:42 +08:00
|
|
|
fork_fixup (parent, shared_fc_hdl, "shared_fc_hdl");
|
|
|
|
if (reopen_shared_fc_handler () < 0)
|
|
|
|
api_fatal ("Can't reopen shared fc_handler memory during fork, %E");
|
2020-04-24 21:05:12 +08:00
|
|
|
fork_fixup (parent, owner_needed_evt, "owner_needed_evt");
|
|
|
|
fork_fixup (parent, owner_found_evt, "owner_found_evt");
|
2020-04-25 21:54:18 +08:00
|
|
|
fork_fixup (parent, update_needed_evt, "update_needed_evt");
|
Cygwin: FIFO: designate one reader as owner
Among all the open readers of a FIFO, one is declared to be the owner.
This is the only reader that listens for client connections, and it is
the only one that has an accurate fc_handler list.
Add shared data and methods for getting and setting the owner, as well
as a lock to prevent more than one reader from accessing these data
simultaneously.
Modify the fifo_reader_thread so that it checks the owner at the
beginning of its loop. If there is no owner, it takes ownership. If
there is an owner but it is a different reader, the thread just waits
to be canceled. Otherwise, it listens for client connections as
before.
Remove the 'first' argument from create_pipe_instance. It is not
needed, and it may be confusing in the future since only the owner
knows whether a pipe instance is the first.
When opening a reader, don't return until the fifo_reader_thread has
time to set an owner.
If the owner closes, indicate that there is no longer an owner.
Clear the child's fc_handler list in dup, and don't bother duplicating
the handles. The child never starts out as owner, so it can't use
those handles.
Do the same thing in fixup_after_fork in the close-on-exec case. In
the non-close-on-exec case, the child inherits an fc_handler list that
it can't use, but we can just leave it alone; the handles will be
closed when the child is closed.
2020-03-27 02:32:10 +08:00
|
|
|
if (close_on_exec ())
|
|
|
|
/* Prevent a later attempt to close the non-inherited
|
|
|
|
pipe-instance handles copied from the parent. */
|
|
|
|
nhandlers = 0;
|
2020-05-19 22:14:10 +08:00
|
|
|
if (!(cancel_evt = create_event ()))
|
2020-03-27 02:29:50 +08:00
|
|
|
api_fatal ("Can't create reader thread cancel event during fork, %E");
|
2020-05-19 22:14:10 +08:00
|
|
|
if (!(thr_sync_evt = create_event ()))
|
2020-03-27 02:29:50 +08:00
|
|
|
api_fatal ("Can't create reader thread sync event during fork, %E");
|
2020-03-27 21:43:30 +08:00
|
|
|
inc_nreaders ();
|
2020-03-26 07:22:10 +08:00
|
|
|
me.winpid = GetCurrentProcessId ();
|
2020-03-27 02:29:50 +08:00
|
|
|
new cygthread (fifo_reader_thread, this, "fifo_reader", thr_sync_evt);
|
2020-05-04 23:36:20 +08:00
|
|
|
}
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (writer)
|
|
|
|
inc_nwriters ();
|
2009-07-25 04:54:33 +08:00
|
|
|
}
|
|
|
|
|
2019-04-18 23:39:52 +08:00
|
|
|
void
|
|
|
|
fhandler_fifo::fixup_after_exec ()
|
|
|
|
{
|
|
|
|
fhandler_base::fixup_after_exec ();
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (close_on_exec ())
|
|
|
|
return;
|
|
|
|
if (reopen_shmem () < 0)
|
|
|
|
api_fatal ("Can't reopen shared memory during exec, %E");
|
|
|
|
if (reader)
|
2020-05-04 23:36:20 +08:00
|
|
|
{
|
|
|
|
/* Make sure the child starts unlocked. */
|
|
|
|
fifo_client_unlock ();
|
|
|
|
|
2020-04-24 06:43:42 +08:00
|
|
|
if (reopen_shared_fc_handler () < 0)
|
|
|
|
api_fatal ("Can't reopen shared fc_handler memory during exec, %E");
|
2020-04-03 01:47:18 +08:00
|
|
|
fc_handler = NULL;
|
|
|
|
nhandlers = shandlers = 0;
|
2020-05-19 22:14:10 +08:00
|
|
|
if (!(cancel_evt = create_event ()))
|
2020-03-27 02:29:50 +08:00
|
|
|
api_fatal ("Can't create reader thread cancel event during exec, %E");
|
2020-05-19 22:14:10 +08:00
|
|
|
if (!(thr_sync_evt = create_event ()))
|
2020-03-27 02:29:50 +08:00
|
|
|
api_fatal ("Can't create reader thread sync event during exec, %E");
|
2020-03-27 21:43:30 +08:00
|
|
|
/* At this moment we're a new reader. The count will be
|
|
|
|
decremented when the parent closes. */
|
|
|
|
inc_nreaders ();
|
2020-05-19 22:14:10 +08:00
|
|
|
me.winpid = GetCurrentProcessId ();
|
2020-03-27 02:29:50 +08:00
|
|
|
new cygthread (fifo_reader_thread, this, "fifo_reader", thr_sync_evt);
|
2020-05-04 23:36:20 +08:00
|
|
|
}
|
Cygwin: FIFO: keep a writer count in shared memory
When a reader opens, it needs to block if there are no writers open
(unless is is opened with O_NONBLOCK). This is easy for the first
reader to test, since it can just wait for a writer to signal that it
is open (via the write_ready event). But when a second reader wants
to open, all writers might have closed.
To check this, use a new '_nwriters' member of struct fifo_shmem_t,
which keeps track of the number of open writers. This should be more
reliable than the previous method.
Add nwriters_lock to control access to shmem->_nwriters, and remove
reader_opening_lock, which is no longer needed.
Previously only readers had access to the shared memory, but now
writers access it too so that they can increment _nwriters during
open/dup/fork/exec and decrement it during close.
Add an optional 'only_open' argument to create_shmem for use by
writers, which only open the shared memory rather than first trying to
create it. Since writers don't need to access the shared memory until
they have successfully connected to a pipe instance, they can safely
assume that a reader has already created the shared memory.
For debugging purposes, change create_shmem to return 1 instead of 0
when a reader successfully opens the shared memory after finding that
it had already been created.
Remove check_write_ready_evt, write_ready_ok_evt, and
check_write_ready(), which are no longer needed.
When opening a writer and looping to try to get a connection, recheck
read_ready at the top of the loop since the number of readers might
have changed.
To slightly speed up the process of opening the first reader, take
ownership immediately rather than waiting for the fifo_reader_thread
to handle it.
2020-07-12 02:05:23 +08:00
|
|
|
if (writer)
|
|
|
|
inc_nwriters ();
|
2019-04-18 23:39:52 +08:00
|
|
|
}
|
|
|
|
|
2009-07-25 04:54:33 +08:00
|
|
|
void
|
|
|
|
fhandler_fifo::set_close_on_exec (bool val)
|
|
|
|
{
|
|
|
|
fhandler_base::set_close_on_exec (val);
|
2011-10-30 12:50:36 +08:00
|
|
|
set_no_inheritance (read_ready, val);
|
|
|
|
set_no_inheritance (write_ready, val);
|
2020-04-26 21:38:46 +08:00
|
|
|
set_no_inheritance (writer_opening, val);
|
2020-04-24 09:29:32 +08:00
|
|
|
if (reader)
|
|
|
|
{
|
2020-04-24 21:05:12 +08:00
|
|
|
set_no_inheritance (owner_needed_evt, val);
|
|
|
|
set_no_inheritance (owner_found_evt, val);
|
2020-04-25 21:54:18 +08:00
|
|
|
set_no_inheritance (update_needed_evt, val);
|
2020-04-24 09:29:32 +08:00
|
|
|
fifo_client_lock ();
|
|
|
|
for (int i = 0; i < nhandlers; i++)
|
|
|
|
set_no_inheritance (fc_handler[i].h, val);
|
|
|
|
fifo_client_unlock ();
|
|
|
|
}
|
2009-07-25 04:54:33 +08:00
|
|
|
}
|