From 4583dc43b65f2680d77dfb24ca6395806c404daa Mon Sep 17 00:00:00 2001 From: Kirill Artemev Date: Sun, 10 Nov 2024 17:33:15 +0500 Subject: [PATCH] esync: rebase patches and fix fsync patches (#1314) * esync: rebase patches and fix fsync patches Signed-off-by: Kirill Artemev * fix patches Signed-off-by: Kirill Artemev --------- Signed-off-by: Kirill Artemev --- .../wine-tkg-patches/proton/esync/esync | 4 +- .../proton/esync/esync-unix-mainline.patch | 8 + .../legacy/esync-unix-mainline-962017be.patch | 4764 +++++++++++++++++ .../proton/fsync/fsync-unix-mainline.patch | 3 +- .../proton/fsync/fsync-unix-staging.patch | 3 +- 5 files changed, 4777 insertions(+), 5 deletions(-) create mode 100644 wine-tkg-git/wine-tkg-patches/proton/esync/legacy/esync-unix-mainline-962017be.patch diff --git a/wine-tkg-git/wine-tkg-patches/proton/esync/esync b/wine-tkg-git/wine-tkg-patches/proton/esync/esync index 640dccce2..212be39d2 100644 --- a/wine-tkg-git/wine-tkg-patches/proton/esync/esync +++ b/wine-tkg-git/wine-tkg-patches/proton/esync/esync @@ -2,8 +2,10 @@ # esync if [ "$_use_esync" = "true" ]; then - if ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor e01b70851fae74c9a4067e00f4c48f17f319ed2d HEAD ); then + if ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor 962017beef4a0405e4fece0075e286e445da55da HEAD ); then _patchname='esync-unix-mainline.patch' && _patchmsg="Using Esync (unix, mainline) patchset" && nonuser_patcher + elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor e01b70851fae74c9a4067e00f4c48f17f319ed2d HEAD ); then + _patchname='esync-unix-mainline-962017be.patch' && _patchmsg="Using Esync (unix, mainline) patchset" && nonuser_patcher elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor cebad42f35c3360ee73c0ff63baa546cd1bf35aa HEAD ); then _patchname='esync-unix-mainline-e01b708.patch' && _patchmsg="Using Esync (unix, mainline) patchset" && nonuser_patcher elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor 9305ac22067786c909071182a3ed65c2b898a102 HEAD ); then diff --git a/wine-tkg-git/wine-tkg-patches/proton/esync/esync-unix-mainline.patch b/wine-tkg-git/wine-tkg-patches/proton/esync/esync-unix-mainline.patch index 157b1d322..eb85d1e26 100644 --- a/wine-tkg-git/wine-tkg-patches/proton/esync/esync-unix-mainline.patch +++ b/wine-tkg-git/wine-tkg-patches/proton/esync/esync-unix-mainline.patch @@ -4019,6 +4019,14 @@ index c83e8c17027..e59a5b6c183 100644 no_satisfied, /* satisfied */ no_signal, /* signal */ named_pipe_device_file_get_fd, /* get_fd */ +@@ -344,6 +344,7 @@ + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + named_pipe_dir_get_fd, /* get_fd */ diff --git a/server/object.h b/server/object.h index 43f636b397b..5b6bb9cbfe1 100644 --- a/server/object.h diff --git a/wine-tkg-git/wine-tkg-patches/proton/esync/legacy/esync-unix-mainline-962017be.patch b/wine-tkg-git/wine-tkg-patches/proton/esync/legacy/esync-unix-mainline-962017be.patch new file mode 100644 index 000000000..157b1d322 --- /dev/null +++ b/wine-tkg-git/wine-tkg-patches/proton/esync/legacy/esync-unix-mainline-962017be.patch @@ -0,0 +1,4764 @@ +From c3e0c63e02d9a305d10b489b39b18adfe2e78393 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Fri, 16 Oct 2020 20:51:21 +0200 +Subject: Esync rebased 5.13+ mainline + + +diff --git a/README.esync b/README.esync +new file mode 100644 +index 00000000000..11d86563a10 +--- /dev/null ++++ b/README.esync +@@ -0,0 +1,196 @@ ++This is eventfd-based synchronization, or 'esync' for short. Turn it on with ++WINEESYNC=1; debug it with +esync. ++ ++== BUGS AND LIMITATIONS == ++ ++Please let me know if you find any bugs. If you can, also attach a log with +++seh,+pid,+esync,+server,+timestamp. ++ ++If you get something like "eventfd: Too many open files" and then things start ++crashing, you've probably run out of file descriptors. esync creates one ++eventfd descriptor for each synchronization object, and some games may use a ++large number of these. Linux by default limits a process to 4096 file ++descriptors, which probably was reasonable back in the nineties but isn't ++really anymore. (Fortunately Debian and derivatives [Ubuntu, Mint] already ++have a reasonable limit.) To raise the limit you'll want to edit ++/etc/security/limits.conf and add a line like ++ ++* hard nofile 1048576 ++ ++then restart your session. ++ ++On distributions using systemd, the settings in `/etc/security/limits.conf` ++will be overridden by systemd's own settings. If you run `ulimit -Hn` and it ++returns a lower number than the one you've previously set, then you can set ++ ++DefaultLimitNOFILE=1048576 ++ ++in both `/etc/systemd/system.conf` and `/etc/systemd/user.conf`. You can then ++execute `sudo systemctl daemon-reexec` and restart your session. Check again ++with `ulimit -Hn` that the limit is correct. ++ ++Also note that if the wineserver has esync active, all clients also must, and ++vice versa. Otherwise things will probably crash quite badly. ++ ++== EXPLANATION == ++ ++The aim is to execute all synchronization operations in "user-space", that is, ++without going through wineserver. We do this using Linux's eventfd ++facility. The main impetus to using eventfd is so that we can poll multiple ++objects at once; in particular we can't do this with futexes, or pthread ++semaphores, or the like. The only way I know of to wait on any of multiple ++objects is to use select/poll/epoll to wait on multiple fds, and eventfd gives ++us those fds in a quite usable way. ++ ++Whenever a semaphore, event, or mutex is created, we have the server, instead ++of creating a traditional server-side event/semaphore/mutex, instead create an ++'esync' primitive. These live in esync.c and are very slim objects; in fact, ++they don't even know what type of primitive they are. The server is involved ++at all because we still need a way of creating named objects, passing handles ++to another process, etc. ++ ++The server creates an eventfd file descriptor with the requested parameters ++and passes it back to ntdll. ntdll creates an object of the appropriate type, ++then caches it in a table. This table is copied almost wholesale from the fd ++cache code in server.c. ++ ++Specific operations follow quite straightforwardly from eventfd: ++ ++* To release an object, or set an event, we simply write() to it. ++* An object is signalled if read() succeeds on it. Notably, we create all ++ eventfd descriptors with O_NONBLOCK, so that we can atomically check if an ++ object is signalled and grab it if it is. This also lets us reset events. ++* For objects whose state should not be reset upon waiting—e.g. manual-reset ++ events—we simply check for the POLLIN flag instead of reading. ++* Semaphores are handled by the EFD_SEMAPHORE flag. This matches up quite well ++ (although with some difficulties; see below). ++* Mutexes store their owner thread locally. This isn't reliable information if ++ a different process's thread owns the mutex, but this doesn't matter—a ++ thread should only care whether it owns the mutex, so it knows whether to ++ try waiting on it or simply to increase the recursion count. ++ ++The interesting part about esync is that (almost) all waits happen in ntdll, ++including those on server-bound objects. The idea here is that on the server ++side, for any waitable object, we create an eventfd file descriptor (not an ++esync primitive), and then pass it to ntdll if the program tries to wait on ++it. These are cached too, so only the first wait will require a round trip to ++the server. Then the server signals the file descriptor as appropriate, and ++thereby wakes up the client. So far this is implemented for processes, ++threads, message queues (difficult; see below), and device managers (necessary ++for drivers to work). All of these are necessarily server-bound, so we ++wouldn't really gain anything by signalling on the client side instead. Of ++course, except possibly for message queues, it's not likely that any program ++(cutting-edge D3D game or not) is going to be causing a great wineserver load ++by waiting on any of these objects; the motivation was rather to provide a way ++to wait on ntdll-bound and server-bound objects at the same time. ++ ++Some cases are still passed to the server, and there's probably no reason not ++to keep them that way. Those that I noticed while testing include: async ++objects, which are internal to the file APIs and never exposed to userspace, ++startup_info objects, which are internal to the loader and signalled when a ++process starts, and keyed events, which are exposed through an ntdll API ++(although not through kernel32) but can't be mixed with other objects (you ++have to use NtWaitForKeyedEvent()). Other cases include: named pipes, debug ++events, sockets, and timers. It's unlikely we'll want to optimize debug events ++or sockets (or any of the other, rather rare, objects), but it is possible ++we'll want to optimize named pipes or timers. ++ ++There were two sort of complications when working out the above. The first one ++was events. The trouble is that (1) the server actually creates some events by ++itself and (2) the server sometimes manipulates events passed by the ++client. Resolving the first case was easy enough, and merely entailed creating ++eventfd descriptors for the events the same way as for processes and threads ++(note that we don't really lose anything this way; the events include ++"LowMemoryCondition" and the event that signals system processes to shut ++down). For the second case I basically had to hook the server-side event ++functions to redirect to esync versions if the event was actually an esync ++primitive. ++ ++The second complication was message queues. The difficulty here is that X11 ++signals events by writing into a pipe (at least I think it's a pipe?), and so ++as a result wineserver has to poll on that descriptor. In theory we could just ++let wineserver do so and then signal us as appropriate, except that wineserver ++only polls on the pipe when the thread is waiting for events (otherwise we'd ++get e.g. keyboard input while the thread is doing something else, and spin ++forever trying to wake up a thread that doesn't care). The obvious solution is ++just to poll on that fd ourselves, and that's what I did—it's just that ++getting the fd from wineserver was kind of ugly, and the code for waiting was ++also kind of ugly basically because we have to wait on both X11's fd and the ++"normal" process/thread-style wineserver fd that we use to signal sent ++messages. The upshot about the whole thing was that races are basically ++impossible, since a thread can only wait on its own queue. ++ ++System APCs already work, since the server will forcibly suspend a thread if ++it's not already waiting, and so we just need to check for EINTR from ++poll(). User APCs and alertable waits are implemented in a similar style to ++message queues (well, sort of): whenever someone executes an alertable wait, ++we add an additional eventfd to the list, which the server signals when an APC ++arrives. If that eventfd gets signaled, we hand it off to the server to take ++care of, and return STATUS_USER_APC. ++ ++Originally I kept the volatile state of semaphores and mutexes inside a ++variable local to the handle, with the knowledge that this would break if ++someone tried to open the handle elsewhere or duplicate it. It did, and so now ++this state is stored inside shared memory. This is of the POSIX variety, is ++allocated by the server (but never mapped there) and lives under the path ++"/wine-esync". ++ ++There are a couple things that this infrastructure can't handle, although ++surprisingly there aren't that many. In particular: ++* Implementing wait-all, i.e. WaitForMultipleObjects(..., TRUE, ...), is not ++ exactly possible the way we'd like it to be possible. In theory that ++ function should wait until it knows all objects are available, then grab ++ them all at once atomically. The server (like the kernel) can do this ++ because the server is single-threaded and can't race with itself. We can't ++ do this in ntdll, though. The approach I've taken I've laid out in great ++ detail in the relevant patch, but for a quick summary we poll on each object ++ until it's signaled (but don't grab it), check them all again, and if ++ they're all signaled we try to grab them all at once in a tight loop, and if ++ we fail on any of them we reset the count on whatever we shouldn't have ++ consumed. Such a blip would necessarily be very quick. ++* The whole patchset only works on Linux, where eventfd is available. However, ++ it should be possible to make it work on a Mac, since eventfd is just a ++ quicker, easier way to use pipes (i.e. instead of writing 1 to the fd you'd ++ write 1 byte; instead of reading a 64-bit value from the fd you'd read as ++ many bytes as you can carry, which is admittedly less than 2**64 but ++ can probably be something reasonable.) It's also possible, although I ++ haven't yet looked, to use some different kind of synchronization ++ primitives, but pipes would be easiest to tack onto this framework. ++* PulseEvent() can't work the way it's supposed to work. Fortunately it's rare ++ and deprecated. It's also explicitly mentioned on MSDN that a thread can ++ miss the notification for a kernel APC, so in a sense we're not necessarily ++ doing anything wrong. ++ ++There are some things that are perfectly implementable but that I just haven't ++done yet: ++* Other synchronizable server primitives. It's unlikely we'll need any of ++ these, except perhaps named pipes (which would honestly be rather difficult) ++ and (maybe) timers. ++* Access masks. We'd need to store these inside ntdll, and validate them when ++ someone tries to execute esync operations. ++ ++This patchset was inspired by Daniel Santos' "hybrid synchronization" ++patchset. My idea was to create a framework whereby even contended waits could ++be executed in userspace, eliminating a lot of the complexity that his ++synchronization primitives used. I do however owe some significant gratitude ++toward him for setting me on the right path. ++ ++I've tried to maximize code separation, both to make any potential rebases ++easier and to ensure that esync is only active when configured. All code in ++existing source files is guarded with "if (do_esync())", and generally that ++condition is followed by "return esync_version_of_this_method(...);", where ++the latter lives in esync.c and is declared in esync.h. I've also tried to ++make the patchset very clear and readable—to write it as if I were going to ++submit it upstream. (Some intermediate patches do break things, which Wine is ++generally against, but I think it's for the better in this case.) I have cut ++some corners, though; there is some error checking missing, or implicit ++assumptions that the program is behaving correctly. ++ ++I've tried to be careful about races. There are a lot of comments whose ++purpose are basically to assure me that races are impossible. In most cases we ++don't have to worry about races since all of the low-level synchronization is ++done by the kernel. ++ ++Anyway, yeah, this is esync. Use it if you like. ++ ++--Zebediah Figura + +diff --git a/configure.ac b/configure.ac +index dd925caf312..f1084515b7a 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -507,6 +507,7 @@ AC_CHECK_HEADERS(\ + sys/cdio.h \ + sys/epoll.h \ + sys/event.h \ ++ sys/eventfd.h \ + sys/extattr.h \ + sys/filio.h \ + sys/ipc.h \ +@@ -2260,6 +2261,7 @@ AC_CHECK_FUNCS(\ + port_create \ + posix_fadvise \ + posix_fallocate \ ++ ppoll \ + prctl \ + pread \ + proc_pidinfo \ +@@ -2324,6 +2326,12 @@ case $host_os in + ;; + esac + ++ac_save_LIBS=$LIBS ++AC_SEARCH_LIBS(shm_open, rt, ++ [AC_DEFINE(HAVE_SHM_OPEN, 1, [Define to 1 if you have the `shm_open' function.]) ++ test "$ac_res" = "none required" || AC_SUBST(RT_LIBS,"$ac_res")]) ++LIBS=$ac_save_LIBS ++ + AC_CACHE_CHECK([for sched_setaffinity],wine_cv_have_sched_setaffinity, + AC_LINK_IFELSE([AC_LANG_PROGRAM( + [[#include ]], [[sched_setaffinity(0, 0, 0);]])],[wine_cv_have_sched_setaffinity=yes],[wine_cv_have_sched_setaffinity=no])) +diff --git a/dlls/kernel32/tests/sync.c b/dlls/kernel32/tests/sync.c +index 2a47abf342c..0f072a5846e 100644 +--- a/dlls/kernel32/tests/sync.c ++++ b/dlls/kernel32/tests/sync.c +@@ -54,6 +54,7 @@ static BOOLEAN (WINAPI *pTryAcquireSRWLockShared)(PSRWLOCK); + + static NTSTATUS (WINAPI *pNtAllocateVirtualMemory)(HANDLE, PVOID *, ULONG_PTR, SIZE_T *, ULONG, ULONG); + static NTSTATUS (WINAPI *pNtFreeVirtualMemory)(HANDLE, PVOID *, SIZE_T *, ULONG); ++static NTSTATUS (WINAPI *pNtQuerySystemTime)(LARGE_INTEGER *); + static NTSTATUS (WINAPI *pNtWaitForSingleObject)(HANDLE, BOOLEAN, const LARGE_INTEGER *); + static NTSTATUS (WINAPI *pNtWaitForMultipleObjects)(ULONG,const HANDLE*,BOOLEAN,BOOLEAN,const LARGE_INTEGER*); + static PSLIST_ENTRY (__fastcall *pRtlInterlockedPushListSList)(PSLIST_HEADER list, PSLIST_ENTRY first, +@@ -177,8 +178,23 @@ static void test_signalandwait(void) + CloseHandle(file); + } + ++static HANDLE mutex, mutex2, mutices[2]; ++ ++static DWORD WINAPI mutex_thread( void *param ) ++{ ++ DWORD expect = (DWORD)(DWORD_PTR)param; ++ DWORD ret; ++ ++ ret = WaitForSingleObject( mutex, 0 ); ++ ok(ret == expect, "expected %lu, got %lu\n", expect, ret); ++ ++ if (!ret) ReleaseMutex( mutex ); ++ return 0; ++} ++ + static void test_mutex(void) + { ++ HANDLE thread; + DWORD wait_ret; + BOOL ret; + HANDLE hCreated; +@@ -218,7 +234,8 @@ todo_wine + SetLastError(0xdeadbeef); + hOpened = OpenMutexA(GENERIC_READ | GENERIC_WRITE, FALSE, "WineTestMutex"); + ok(hOpened != NULL, "OpenMutex failed with error %ld\n", GetLastError()); +- wait_ret = WaitForSingleObject(hOpened, INFINITE); ++ wait_ret = WaitForSingleObject(hOpened, 0); ++todo_wine_if(getenv("WINEESYNC")) /* XFAIL: validation is not implemented */ + ok(wait_ret == WAIT_FAILED, "WaitForSingleObject succeeded\n"); + CloseHandle(hOpened); + +@@ -249,6 +266,7 @@ todo_wine + + SetLastError(0xdeadbeef); + ret = ReleaseMutex(hCreated); ++todo_wine_if(getenv("WINEESYNC")) /* XFAIL: due to the above */ + ok(!ret && (GetLastError() == ERROR_NOT_OWNER), + "ReleaseMutex should have failed with ERROR_NOT_OWNER instead of %ld\n", GetLastError()); + +@@ -287,6 +305,85 @@ todo_wine + CloseHandle(hOpened); + + CloseHandle(hCreated); ++ ++ mutex = CreateMutexA( NULL, FALSE, NULL ); ++ ok(!!mutex, "got error %lu\n", GetLastError()); ++ ++ ret = ReleaseMutex( mutex ); ++ ok(!ret, "got %ld\n", ret); ++ ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); ++ ++ for (i = 0; i < 100; i++) ++ { ++ ret = WaitForSingleObject( mutex, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ } ++ ++ for (i = 0; i < 100; i++) ++ { ++ ret = ReleaseMutex( mutex ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ } ++ ++ ret = ReleaseMutex( mutex ); ++ ok(!ret, "got %ld\n", ret); ++ ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); ++ ++ thread = CreateThread( NULL, 0, mutex_thread, (void *)0, 0, NULL ); ++ ret = WaitForSingleObject( thread, 2000 ); ++ ok(ret == 0, "wait failed: %lu\n", ret); ++ ++ WaitForSingleObject( mutex, 0 ); ++ ++ thread = CreateThread( NULL, 0, mutex_thread, (void *)WAIT_TIMEOUT, 0, NULL ); ++ ret = WaitForSingleObject( thread, 2000 ); ++ ok(ret == 0, "wait failed: %lu\n", ret); ++ ++ ret = ReleaseMutex( mutex ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ thread = CreateThread( NULL, 0, mutex_thread, (void *)0, 0, NULL ); ++ ret = WaitForSingleObject( thread, 2000 ); ++ ok(ret == 0, "wait failed: %lu\n", ret); ++ ++ mutex2 = CreateMutexA( NULL, TRUE, NULL ); ++ ok(!!mutex2, "got error %lu\n", GetLastError()); ++ ++ ret = ReleaseMutex( mutex2 ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = ReleaseMutex( mutex2 ); ++ ok(!ret, "got %ld\n", ret); ++ ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); ++ ++ mutices[0] = mutex; ++ mutices[1] = mutex2; ++ ++ ret = WaitForMultipleObjects( 2, mutices, FALSE, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = ReleaseMutex( mutex ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = ReleaseMutex( mutex2 ); ++ ok(!ret, "got %ld\n", ret); ++ ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); ++ ++ ret = WaitForMultipleObjects( 2, mutices, TRUE, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = ReleaseMutex( mutex ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = ReleaseMutex( mutex2 ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = CloseHandle( mutex ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = CloseHandle( mutex2 ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ + } + + static void test_slist(void) +@@ -462,12 +559,13 @@ static void test_slist(void) + + static void test_event(void) + { +- HANDLE handle, handle2; ++ HANDLE handle, handle2, handles[2]; + SECURITY_ATTRIBUTES sa; + SECURITY_DESCRIPTOR sd; + ACL acl; + DWORD ret; + BOOL val; ++ int i; + + /* no sd */ + handle = CreateEventA(NULL, FALSE, FALSE, __FILE__ ": Test Event"); +@@ -571,11 +669,130 @@ static void test_event(void) + ok( ret, "QueryMemoryResourceNotification failed err %lu\n", GetLastError() ); + ok( val == FALSE || val == TRUE, "wrong value %lu\n", val ); + CloseHandle( handle ); ++ ++ handle = CreateEventA( NULL, TRUE, FALSE, NULL ); ++ ok(!!handle, "got error %lu\n", GetLastError()); ++ ++ ret = WaitForSingleObject( handle, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ ret = SetEvent( handle ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = SetEvent( handle ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ for (i = 0; i < 100; i++) ++ { ++ ret = WaitForSingleObject( handle, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ } ++ ++ ret = ResetEvent( handle ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = ResetEvent( handle ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = WaitForSingleObject( handle, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ handle2 = CreateEventA( NULL, FALSE, TRUE, NULL ); ++ ok(!!handle2, "got error %lu\n", GetLastError()); ++ ++ ret = WaitForSingleObject( handle2, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = WaitForSingleObject( handle2, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ ret = SetEvent( handle2 ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = SetEvent( handle2 ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = ResetEvent( handle2 ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = ResetEvent( handle2 ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = WaitForSingleObject( handle2, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ handles[0] = handle; ++ handles[1] = handle2; ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ SetEvent( handle ); ++ SetEvent( handle2 ); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = WaitForSingleObject( handle2, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ResetEvent( handle ); ++ SetEvent( handle2 ); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == 1, "got %lu\n", ret); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ SetEvent( handle ); ++ SetEvent( handle2 ); ++ ++ ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ SetEvent( handle2 ); ++ ResetEvent( handle ); ++ ++ ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ ret = WaitForSingleObject( handle2, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ handles[0] = handle2; ++ handles[1] = handle; ++ SetEvent( handle ); ++ SetEvent( handle2 ); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == 1, "got %lu\n", ret); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == 1, "got %lu\n", ret); ++ ++ ret = CloseHandle( handle ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ++ ret = CloseHandle( handle2 ); ++ ok(ret, "got error %lu\n", GetLastError()); + } + + static void test_semaphore(void) + { +- HANDLE handle, handle2; ++ HANDLE handle, handle2, handles[2]; ++ DWORD ret; ++ LONG prev; ++ int i; + + /* test case sensitivity */ + +@@ -617,6 +834,99 @@ static void test_semaphore(void) + ok( GetLastError() == ERROR_INVALID_PARAMETER, "wrong error %lu\n", GetLastError()); + + CloseHandle( handle ); ++ ++ handle = CreateSemaphoreA( NULL, 0, 5, NULL ); ++ ok(!!handle, "CreateSemaphore failed: %lu\n", GetLastError()); ++ ++ ret = WaitForSingleObject( handle, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ ret = ReleaseSemaphore( handle, 1, &prev ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ok(prev == 0, "got prev %ld\n", prev); ++ ++ ret = ReleaseSemaphore( handle, 1, &prev ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ok(prev == 1, "got prev %ld\n", prev); ++ ++ ret = ReleaseSemaphore( handle, 5, &prev ); ++ ok(!ret, "got %ld\n", ret); ++ ok(GetLastError() == ERROR_TOO_MANY_POSTS, "got error %lu\n", GetLastError()); ++ ok(prev == 1, "got prev %ld\n", prev); ++ ++ ret = ReleaseSemaphore( handle, 2, &prev ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ok(prev == 2, "got prev %ld\n", prev); ++ ++ ret = ReleaseSemaphore( handle, 1, &prev ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ok(prev == 4, "got prev %ld\n", prev); ++ ++ for (i = 0; i < 5; i++) ++ { ++ ret = WaitForSingleObject( handle, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ } ++ ++ ret = WaitForSingleObject( handle, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ handle2 = CreateSemaphoreA( NULL, 3, 5, NULL ); ++ ok(!!handle2, "CreateSemaphore failed: %lu\n", GetLastError()); ++ ++ ret = ReleaseSemaphore( handle2, 1, &prev ); ++ ok(ret, "got error %lu\n", GetLastError()); ++ ok(prev == 3, "got prev %ld\n", prev); ++ ++ for (i = 0; i < 4; i++) ++ { ++ ret = WaitForSingleObject( handle2, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ } ++ ++ ret = WaitForSingleObject( handle2, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ handles[0] = handle; ++ handles[1] = handle2; ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ ReleaseSemaphore( handle, 1, NULL ); ++ ReleaseSemaphore( handle2, 1, NULL ); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == 1, "got %lu\n", ret); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ ReleaseSemaphore( handle, 1, NULL ); ++ ReleaseSemaphore( handle2, 1, NULL ); ++ ++ ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ ReleaseSemaphore( handle, 1, NULL ); ++ ++ ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ ret = WaitForSingleObject( handle, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = CloseHandle( handle ); ++ ok(ret, "got error %lu\n", ret); ++ ++ ret = CloseHandle( handle2 ); ++ ok(ret, "got error %lu\n", ret); + } + + static void test_waitable_timer(void) +@@ -1171,11 +1481,15 @@ static HANDLE modify_handle(HANDLE handle, DWORD modify) + return ULongToHandle(tmp); + } + ++#define TIMEOUT_INFINITE (((LONGLONG)0x7fffffff) << 32 | 0xffffffff) ++ + static void test_WaitForSingleObject(void) + { + HANDLE signaled, nonsignaled, invalid; ++ LARGE_INTEGER ntnow, ntthen; + LARGE_INTEGER timeout; + NTSTATUS status; ++ DWORD now, then; + DWORD ret; + + signaled = CreateEventW(NULL, TRUE, TRUE, NULL); +@@ -1260,6 +1574,68 @@ static void test_WaitForSingleObject(void) + status = pNtWaitForSingleObject(GetCurrentThread(), FALSE, &timeout); + ok(status == STATUS_TIMEOUT, "expected STATUS_TIMEOUT, got %08lx\n", status); + ++ ret = WaitForSingleObject( signaled, 0 ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ ret = WaitForSingleObject( nonsignaled, 0 ); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ++ /* test that a timed wait actually does wait */ ++ now = GetTickCount(); ++ ret = WaitForSingleObject( nonsignaled, 100 ); ++ then = GetTickCount(); ++ ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); ++ ok(abs((then - now) - 100) < 5, "got %lu ms\n", then - now); ++ ++ now = GetTickCount(); ++ ret = WaitForSingleObject( signaled, 100 ); ++ then = GetTickCount(); ++ ok(ret == 0, "got %lu\n", ret); ++ ok(abs(then - now) < 5, "got %lu ms\n", then - now); ++ ++ ret = WaitForSingleObject( signaled, INFINITE ); ++ ok(ret == 0, "got %lu\n", ret); ++ ++ /* test NT timeouts */ ++ pNtQuerySystemTime( &ntnow ); ++ timeout.QuadPart = ntnow.QuadPart + 100 * 10000; ++ status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); ++ pNtQuerySystemTime( &ntthen ); ++ ok(status == STATUS_TIMEOUT, "got %#lx\n", status); ++ ok(abs(((ntthen.QuadPart - ntnow.QuadPart) / 10000) - 100) < 5, "got %s ns\n", ++ wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); ++ ++ pNtQuerySystemTime( &ntnow ); ++ timeout.QuadPart = -100 * 10000; ++ status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); ++ pNtQuerySystemTime( &ntthen ); ++ ok(status == STATUS_TIMEOUT, "got %#lx\n", status); ++ ok(abs(((ntthen.QuadPart - ntnow.QuadPart) / 10000) - 100) < 5, "got %s ns\n", ++ wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); ++ ++ status = pNtWaitForSingleObject( signaled, FALSE, NULL ); ++ ok(status == 0, "got %#lx\n", status); ++ ++ timeout.QuadPart = TIMEOUT_INFINITE; ++ status = pNtWaitForSingleObject( signaled, FALSE, &timeout ); ++ ok(status == 0, "got %#lx\n", status); ++ ++ pNtQuerySystemTime( &ntnow ); ++ timeout.QuadPart = ntnow.QuadPart; ++ status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); ++ pNtQuerySystemTime( &ntthen ); ++ ok(status == STATUS_TIMEOUT, "got %#lx\n", status); ++ ok(abs((ntthen.QuadPart - ntnow.QuadPart) / 10000) < 5, "got %s ns\n", ++ wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); ++ ++ pNtQuerySystemTime( &ntnow ); ++ timeout.QuadPart = ntnow.QuadPart - 100 * 10000; ++ status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); ++ pNtQuerySystemTime( &ntthen ); ++ ok(status == STATUS_TIMEOUT, "got %#lx\n", status); ++ ok(abs((ntthen.QuadPart - ntnow.QuadPart) / 10000) < 5, "got %s ns\n", ++ wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); ++ + CloseHandle(signaled); + CloseHandle(nonsignaled); + } +@@ -2702,6 +3078,84 @@ static void test_QueueUserAPC(void) + CloseHandle(thread); + } + ++static int zigzag_state, zigzag_count[2], zigzag_stop; ++ ++static DWORD CALLBACK zigzag_event0(void *arg) ++{ ++ HANDLE *events = arg; ++ ++ while (!zigzag_stop) ++ { ++ WaitForSingleObject(events[0], INFINITE); ++ ResetEvent(events[0]); ++ ok(zigzag_state == 0, "got wrong state %ld\n", zigzag_state); ++ zigzag_state++; ++ SetEvent(events[1]); ++ zigzag_count[0]++; ++ } ++ trace("thread 0 got done\n"); ++ return 0; ++} ++ ++static DWORD CALLBACK zigzag_event1(void *arg) ++{ ++ HANDLE *events = arg; ++ ++ while (!zigzag_stop) ++ { ++ WaitForSingleObject(events[1], INFINITE); ++ ResetEvent(events[1]); ++ ok(zigzag_state == 1, "got wrong state %ld\n", zigzag_state); ++ zigzag_state--; ++ SetEvent(events[0]); ++ zigzag_count[1]++; ++ } ++ trace("thread 1 got done\n"); ++ return 0; ++} ++ ++static void test_zigzag_event(void) ++{ ++ /* The basic idea is to test SetEvent/Wait back and forth between two ++ * threads. Each thread clears their own event, sets some common data, ++ * signals the other's, then waits on their own. We make sure the common ++ * data is always in the right state. We also print performance data. */ ++ ++ HANDLE threads[2], events[2]; ++ BOOL ret; ++ ++ events[0] = CreateEventA(NULL, FALSE, FALSE, NULL); ++ events[1] = CreateEventA(NULL, FALSE, FALSE, NULL); ++ ++ threads[0] = CreateThread(NULL, 0, zigzag_event0, events, 0, NULL); ++ threads[1] = CreateThread(NULL, 0, zigzag_event1, events, 0, NULL); ++ ++ zigzag_state = 0; ++ zigzag_count[0] = zigzag_count[1] = 0; ++ zigzag_stop = 0; ++ ++ trace("starting zigzag test (events)\n"); ++ SetEvent(events[0]); ++ Sleep(2000); ++ zigzag_stop = 1; ++ ret = WaitForMultipleObjects(2, threads, FALSE, INFINITE); ++ trace("%ld\n", ret); ++ ok(ret == 0 || ret == 1, "wait failed: %lu\n", ret); ++ ++ ok(zigzag_count[0] == zigzag_count[1] || zigzag_count[0] == zigzag_count[1] + 1, ++ "count did not match: %d != %ld\n", zigzag_count[0], zigzag_count[1]); ++ ++ /* signal the other thread to finish, if it didn't already ++ * (in theory they both would at the same time, but there's a slight race on teardown if we get ++ * thread 1 SetEvent -> thread 0 ResetEvent -> thread 0 Wait -> thread 1 exits */ ++ zigzag_state = 1-ret; ++ SetEvent(events[1-ret]); ++ ret = WaitForSingleObject(threads[1-ret], 1000); ++ ok(!ret, "wait failed: %lu\n", ret); ++ ++ trace("count: %ld\n", zigzag_count[0]); ++} ++ + START_TEST(sync) + { + char **argv; +@@ -2728,6 +3182,7 @@ START_TEST(sync) + pTryAcquireSRWLockShared = (void *)GetProcAddress(hdll, "TryAcquireSRWLockShared"); + pNtAllocateVirtualMemory = (void *)GetProcAddress(hntdll, "NtAllocateVirtualMemory"); + pNtFreeVirtualMemory = (void *)GetProcAddress(hntdll, "NtFreeVirtualMemory"); ++ pNtQuerySystemTime = (void *)GetProcAddress(hntdll, "NtQuerySystemTime"); + pNtWaitForSingleObject = (void *)GetProcAddress(hntdll, "NtWaitForSingleObject"); + pNtWaitForMultipleObjects = (void *)GetProcAddress(hntdll, "NtWaitForMultipleObjects"); + pRtlInterlockedPushListSList = (void *)GetProcAddress(hntdll, "RtlInterlockedPushListSList"); +@@ -2763,5 +3218,6 @@ START_TEST(sync) + test_srwlock_example(); + test_alertable_wait(); + test_apc_deadlock(); ++ test_zigzag_event(); + test_crit_section(); + } +diff --git a/dlls/ntdll/Makefile.in b/dlls/ntdll/Makefile.in +index 67847bb9392..c96a62ae006 100644 +--- a/dlls/ntdll/Makefile.in ++++ b/dlls/ntdll/Makefile.in +@@ -46,6 +46,7 @@ C_SRCS = \ + unix/cdrom.c \ + unix/debug.c \ + unix/env.c \ ++ unix/esync.c \ + unix/file.c \ + unix/loader.c \ + unix/process.c \ +diff --git a/dlls/ntdll/unix/esync.c b/dlls/ntdll/unix/esync.c +new file mode 100644 +index 00000000000..ed801c71991 +--- /dev/null ++++ b/dlls/ntdll/unix/esync.c +@@ -0,0 +1,1326 @@ ++/* ++ * eventfd-based synchronization objects ++ * ++ * Copyright (C) 2018 Zebediah Figura ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA ++ */ ++ ++#if 0 ++#pragma makedep unix ++#endif ++ ++#define _GNU_SOURCE ++ ++#include "config.h" ++ ++#include ++#include ++#include ++#ifdef HAVE_POLL_H ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#ifdef HAVE_SYS_STAT_H ++# include ++#endif ++#include ++#include ++ ++#include "ntstatus.h" ++#define WIN32_NO_STATUS ++#include "windef.h" ++#include "winternl.h" ++#include "wine/server.h" ++#include "wine/debug.h" ++ ++#include "unix_private.h" ++#include "esync.h" ++ ++WINE_DEFAULT_DEBUG_CHANNEL(esync); ++ ++int do_esync(void) ++{ ++#ifdef HAVE_SYS_EVENTFD_H ++ static int do_esync_cached = -1; ++ ++ if (do_esync_cached == -1) ++ do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC")); ++ ++ return do_esync_cached; ++#else ++ static int once; ++ if (!once++) ++ FIXME("eventfd not supported on this platform.\n"); ++ return 0; ++#endif ++} ++ ++struct esync ++{ ++ enum esync_type type; ++ int fd; ++ void *shm; ++}; ++ ++struct semaphore ++{ ++ int max; ++ int count; ++}; ++C_ASSERT(sizeof(struct semaphore) == 8); ++ ++struct mutex ++{ ++ DWORD tid; ++ int count; /* recursion count */ ++}; ++C_ASSERT(sizeof(struct mutex) == 8); ++ ++struct event ++{ ++ int signaled; ++ int locked; ++}; ++C_ASSERT(sizeof(struct event) == 8); ++ ++static char shm_name[29]; ++static int shm_fd; ++static void **shm_addrs; ++static int shm_addrs_size; /* length of the allocated shm_addrs array */ ++static long pagesize; ++ ++static pthread_mutex_t shm_addrs_mutex = PTHREAD_MUTEX_INITIALIZER; ++ ++static void *get_shm( unsigned int idx ) ++{ ++ int entry = (idx * 8) / pagesize; ++ int offset = (idx * 8) % pagesize; ++ void *ret; ++ ++ pthread_mutex_lock( &shm_addrs_mutex ); ++ ++ if (entry >= shm_addrs_size) ++ { ++ int new_size = max(shm_addrs_size * 2, entry + 1); ++ ++ if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) ++ ERR("Failed to grow shm_addrs array to size %d.\n", shm_addrs_size); ++ memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); ++ shm_addrs_size = new_size; ++ } ++ ++ if (!shm_addrs[entry]) ++ { ++ void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize ); ++ if (addr == (void *)-1) ++ ERR("Failed to map page %d (offset %#lx).\n", entry, entry * pagesize); ++ ++ TRACE("Mapping page %d at %p.\n", entry, addr); ++ ++ if (InterlockedCompareExchangePointer( &shm_addrs[entry], addr, 0 )) ++ munmap( addr, pagesize ); /* someone beat us to it */ ++ } ++ ++ ret = (void *)((unsigned long)shm_addrs[entry] + offset); ++ ++ pthread_mutex_unlock( &shm_addrs_mutex ); ++ ++ return ret; ++} ++ ++/* We'd like lookup to be fast. To that end, we use a static list indexed by handle. ++ * This is copied and adapted from the fd cache code. */ ++ ++#define ESYNC_LIST_BLOCK_SIZE (65536 / sizeof(struct esync)) ++#define ESYNC_LIST_ENTRIES 256 ++ ++static struct esync *esync_list[ESYNC_LIST_ENTRIES]; ++static struct esync esync_list_initial_block[ESYNC_LIST_BLOCK_SIZE]; ++ ++static inline UINT_PTR handle_to_index( HANDLE handle, UINT_PTR *entry ) ++{ ++ UINT_PTR idx = (((UINT_PTR)handle) >> 2) - 1; ++ *entry = idx / ESYNC_LIST_BLOCK_SIZE; ++ return idx % ESYNC_LIST_BLOCK_SIZE; ++} ++ ++static struct esync *add_to_list( HANDLE handle, enum esync_type type, int fd, void *shm ) ++{ ++ UINT_PTR entry, idx = handle_to_index( handle, &entry ); ++ ++ if (entry >= ESYNC_LIST_ENTRIES) ++ { ++ FIXME( "too many allocated handles, not caching %p\n", handle ); ++ return FALSE; ++ } ++ ++ if (!esync_list[entry]) /* do we need to allocate a new block of entries? */ ++ { ++ if (!entry) esync_list[0] = esync_list_initial_block; ++ else ++ { ++ void *ptr = anon_mmap_alloc( ESYNC_LIST_BLOCK_SIZE * sizeof(struct esync), ++ PROT_READ | PROT_WRITE ); ++ if (ptr == MAP_FAILED) return FALSE; ++ esync_list[entry] = ptr; ++ } ++ } ++ ++ if (!InterlockedCompareExchange( (int *)&esync_list[entry][idx].type, type, 0 )) ++ { ++ esync_list[entry][idx].fd = fd; ++ esync_list[entry][idx].shm = shm; ++ } ++ return &esync_list[entry][idx]; ++} ++ ++static struct esync *get_cached_object( HANDLE handle ) ++{ ++ UINT_PTR entry, idx = handle_to_index( handle, &entry ); ++ ++ if (entry >= ESYNC_LIST_ENTRIES || !esync_list[entry]) return NULL; ++ if (!esync_list[entry][idx].type) return NULL; ++ ++ return &esync_list[entry][idx]; ++} ++ ++/* Gets an object. This is either a proper esync object (i.e. an event, ++ * semaphore, etc. created using create_esync) or a generic synchronizable ++ * server-side object which the server will signal (e.g. a process, thread, ++ * message queue, etc.) */ ++static NTSTATUS get_object( HANDLE handle, struct esync **obj ) ++{ ++ NTSTATUS ret = STATUS_SUCCESS; ++ enum esync_type type = 0; ++ unsigned int shm_idx = 0; ++ obj_handle_t fd_handle; ++ sigset_t sigset; ++ int fd = -1; ++ ++ if ((*obj = get_cached_object( handle ))) return STATUS_SUCCESS; ++ ++ if ((INT_PTR)handle < 0) ++ { ++ /* We can deal with pseudo-handles, but it's just easier this way */ ++ return STATUS_NOT_IMPLEMENTED; ++ } ++ ++ if (!handle) ++ { ++ /* Shadow of the Tomb Raider really likes passing in NULL handles to ++ * various functions. Concerning, but let's avoid a server call. */ ++ return STATUS_INVALID_HANDLE; ++ } ++ ++ /* We need to try grabbing it from the server. */ ++ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ if (!(*obj = get_cached_object( handle ))) ++ { ++ SERVER_START_REQ( get_esync_fd ) ++ { ++ req->handle = wine_server_obj_handle( handle ); ++ if (!(ret = wine_server_call( req ))) ++ { ++ type = reply->type; ++ shm_idx = reply->shm_idx; ++ fd = receive_fd( &fd_handle ); ++ assert( wine_server_ptr_handle(fd_handle) == handle ); ++ } ++ } ++ SERVER_END_REQ; ++ } ++ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ ++ if (*obj) ++ { ++ /* We managed to grab it while in the CS; return it. */ ++ return STATUS_SUCCESS; ++ } ++ ++ if (ret) ++ { ++ WARN("Failed to retrieve fd for handle %p, status %#x.\n", handle, ret); ++ *obj = NULL; ++ return ret; ++ } ++ ++ TRACE("Got fd %d for handle %p.\n", fd, handle); ++ ++ *obj = add_to_list( handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); ++ return ret; ++} ++ ++NTSTATUS esync_close( HANDLE handle ) ++{ ++ UINT_PTR entry, idx = handle_to_index( handle, &entry ); ++ ++ TRACE("%p.\n", handle); ++ ++ if (entry < ESYNC_LIST_ENTRIES && esync_list[entry]) ++ { ++ if (InterlockedExchange((int *)&esync_list[entry][idx].type, 0)) ++ { ++ close( esync_list[entry][idx].fd ); ++ return STATUS_SUCCESS; ++ } ++ } ++ ++ return STATUS_INVALID_HANDLE; ++} ++ ++static NTSTATUS create_esync( enum esync_type type, HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr, int initval, int max ) ++{ ++ NTSTATUS ret; ++ data_size_t len; ++ struct object_attributes *objattr; ++ obj_handle_t fd_handle; ++ unsigned int shm_idx; ++ sigset_t sigset; ++ int fd; ++ ++ if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; ++ ++ /* We have to synchronize on the fd cache CS so that our calls to ++ * receive_fd don't race with theirs. */ ++ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ SERVER_START_REQ( create_esync ) ++ { ++ req->access = access; ++ req->initval = initval; ++ req->type = type; ++ req->max = max; ++ wine_server_add_data( req, objattr, len ); ++ ret = wine_server_call( req ); ++ if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) ++ { ++ *handle = wine_server_ptr_handle( reply->handle ); ++ type = reply->type; ++ shm_idx = reply->shm_idx; ++ fd = receive_fd( &fd_handle ); ++ assert( wine_server_ptr_handle(fd_handle) == *handle ); ++ } ++ } ++ SERVER_END_REQ; ++ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ ++ if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) ++ { ++ add_to_list( *handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); ++ TRACE("-> handle %p, fd %d.\n", *handle, fd); ++ } ++ ++ free( objattr ); ++ return ret; ++} ++ ++static NTSTATUS open_esync( enum esync_type type, HANDLE *handle, ++ ACCESS_MASK access, const OBJECT_ATTRIBUTES *attr ) ++{ ++ NTSTATUS ret; ++ obj_handle_t fd_handle; ++ unsigned int shm_idx; ++ sigset_t sigset; ++ int fd; ++ ++ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ SERVER_START_REQ( open_esync ) ++ { ++ req->access = access; ++ req->attributes = attr->Attributes; ++ req->rootdir = wine_server_obj_handle( attr->RootDirectory ); ++ req->type = type; ++ if (attr->ObjectName) ++ wine_server_add_data( req, attr->ObjectName->Buffer, attr->ObjectName->Length ); ++ if (!(ret = wine_server_call( req ))) ++ { ++ *handle = wine_server_ptr_handle( reply->handle ); ++ type = reply->type; ++ shm_idx = reply->shm_idx; ++ fd = receive_fd( &fd_handle ); ++ assert( wine_server_ptr_handle(fd_handle) == *handle ); ++ } ++ } ++ SERVER_END_REQ; ++ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ ++ if (!ret) ++ { ++ add_to_list( *handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); ++ ++ TRACE("-> handle %p, fd %d.\n", *handle, fd); ++ } ++ return ret; ++} ++ ++extern NTSTATUS esync_create_semaphore(HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max) ++{ ++ TRACE("name %s, initial %d, max %d.\n", ++ attr ? debugstr_us(attr->ObjectName) : "", initial, max); ++ ++ return create_esync( ESYNC_SEMAPHORE, handle, access, attr, initial, max ); ++} ++ ++NTSTATUS esync_open_semaphore( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr ) ++{ ++ TRACE("name %s.\n", debugstr_us(attr->ObjectName)); ++ ++ return open_esync( ESYNC_SEMAPHORE, handle, access, attr ); ++} ++ ++NTSTATUS esync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ) ++{ ++ struct esync *obj; ++ struct semaphore *semaphore; ++ uint64_t count64 = count; ++ ULONG current; ++ NTSTATUS ret; ++ ++ TRACE("%p, %d, %p.\n", handle, count, prev); ++ ++ if ((ret = get_object( handle, &obj))) return ret; ++ semaphore = obj->shm; ++ ++ do ++ { ++ current = semaphore->count; ++ ++ if (count + current > semaphore->max) ++ return STATUS_SEMAPHORE_LIMIT_EXCEEDED; ++ } while (InterlockedCompareExchange( &semaphore->count, count + current, current ) != current); ++ ++ if (prev) *prev = current; ++ ++ /* We don't have to worry about a race between increasing the count and ++ * write(). The fact that we were able to increase the count means that we ++ * have permission to actually write that many releases to the semaphore. */ ++ ++ if (write( obj->fd, &count64, sizeof(count64) ) == -1) ++ return errno_to_status( errno ); ++ ++ return STATUS_SUCCESS; ++} ++ ++NTSTATUS esync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ) ++{ ++ struct esync *obj; ++ struct semaphore *semaphore; ++ SEMAPHORE_BASIC_INFORMATION *out = info; ++ NTSTATUS ret; ++ ++ TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); ++ ++ if ((ret = get_object( handle, &obj ))) return ret; ++ semaphore = obj->shm; ++ ++ out->CurrentCount = semaphore->count; ++ out->MaximumCount = semaphore->max; ++ if (ret_len) *ret_len = sizeof(*out); ++ ++ return STATUS_SUCCESS; ++} ++ ++NTSTATUS esync_create_event( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr, EVENT_TYPE event_type, BOOLEAN initial ) ++{ ++ enum esync_type type = (event_type == SynchronizationEvent ? ESYNC_AUTO_EVENT : ESYNC_MANUAL_EVENT); ++ ++ TRACE("name %s, %s-reset, initial %d.\n", ++ attr ? debugstr_us(attr->ObjectName) : "", ++ event_type == NotificationEvent ? "manual" : "auto", initial); ++ ++ return create_esync( type, handle, access, attr, initial, 0 ); ++} ++ ++NTSTATUS esync_open_event( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr ) ++{ ++ TRACE("name %s.\n", debugstr_us(attr->ObjectName)); ++ ++ return open_esync( ESYNC_AUTO_EVENT, handle, access, attr ); /* doesn't matter which */ ++} ++ ++static inline void small_pause(void) ++{ ++#ifdef __i386__ ++ __asm__ __volatile__( "rep;nop" : : : "memory" ); ++#else ++ __asm__ __volatile__( "" : : : "memory" ); ++#endif ++} ++ ++/* Manual-reset events are actually racier than other objects in terms of shm ++ * state. With other objects, races don't matter, because we only treat the shm ++ * state as a hint that lets us skip poll()—we still have to read(). But with ++ * manual-reset events we don't, which means that the shm state can be out of ++ * sync with the actual state. ++ * ++ * In general we shouldn't have to worry about races between modifying the ++ * event and waiting on it. If the state changes while we're waiting, it's ++ * equally plausible that we caught it before or after the state changed. ++ * However, we can have races between SetEvent() and ResetEvent(), so that the ++ * event has inconsistent internal state. ++ * ++ * To solve this we have to use the other field to lock the event. Currently ++ * this is implemented as a spinlock, but I'm not sure if a futex might be ++ * better. I'm also not sure if it's possible to obviate locking by arranging ++ * writes and reads in a certain way. ++ * ++ * Note that we don't have to worry about locking in esync_wait_objects(). ++ * There's only two general patterns: ++ * ++ * WaitFor() SetEvent() ++ * ------------------------- ++ * read() ++ * signaled = 0 ++ * signaled = 1 ++ * write() ++ * ------------------------- ++ * read() ++ * signaled = 1 ++ * signaled = 0 ++ * ++ * ------------------------- ++ * ++ * That is, if SetEvent() tries to signal the event before WaitFor() resets its ++ * signaled state, it won't bother trying to write(), and then the signaled ++ * state will be reset, so the result is a consistent non-signaled event. ++ * There's several variations to this pattern but all of them are protected in ++ * the same way. Note however this is why we have to use interlocked_xchg() ++ * event inside of the lock. ++ */ ++ ++/* Removing this spinlock is harder than it looks. esync_wait_objects() can ++ * deal with inconsistent state well enough, and a race between SetEvent() and ++ * ResetEvent() gives us license to yield either result as long as we act ++ * consistently, but that's not enough. Notably, esync_wait_objects() should ++ * probably act like a fence, so that the second half of esync_set_event() does ++ * not seep past a subsequent reset. That's one problem, but no guarantee there ++ * aren't others. */ ++ ++NTSTATUS esync_set_event( HANDLE handle ) ++{ ++ static const uint64_t value = 1; ++ struct esync *obj; ++ struct event *event; ++ NTSTATUS ret; ++ ++ TRACE("%p.\n", handle); ++ ++ if ((ret = get_object( handle, &obj ))) return ret; ++ event = obj->shm; ++ ++ if (obj->type == ESYNC_MANUAL_EVENT) ++ { ++ /* Acquire the spinlock. */ ++ while (InterlockedCompareExchange( &event->locked, 1, 0 )) ++ small_pause(); ++ } ++ ++ /* For manual-reset events, as long as we're in a lock, we can take the ++ * optimization of only calling write() if the event wasn't already ++ * signaled. ++ * ++ * For auto-reset events, esync_wait_objects() must grab the kernel object. ++ * Thus if we got into a race so that the shm state is signaled but the ++ * eventfd is unsignaled (i.e. reset shm, set shm, set fd, reset fd), we ++ * *must* signal the fd now, or any waiting threads will never wake up. */ ++ ++ if (!InterlockedExchange( &event->signaled, 1 ) || obj->type == ESYNC_AUTO_EVENT) ++ { ++ if (write( obj->fd, &value, sizeof(value) ) == -1) ++ ERR("write: %s\n", strerror(errno)); ++ } ++ ++ if (obj->type == ESYNC_MANUAL_EVENT) ++ { ++ /* Release the spinlock. */ ++ event->locked = 0; ++ } ++ ++ return STATUS_SUCCESS; ++} ++ ++NTSTATUS esync_reset_event( HANDLE handle ) ++{ ++ uint64_t value; ++ struct esync *obj; ++ struct event *event; ++ NTSTATUS ret; ++ ++ TRACE("%p.\n", handle); ++ ++ if ((ret = get_object( handle, &obj ))) return ret; ++ event = obj->shm; ++ ++ if (obj->type == ESYNC_MANUAL_EVENT) ++ { ++ /* Acquire the spinlock. */ ++ while (InterlockedCompareExchange( &event->locked, 1, 0 )) ++ small_pause(); ++ } ++ ++ /* For manual-reset events, as long as we're in a lock, we can take the ++ * optimization of only calling read() if the event was already signaled. ++ * ++ * For auto-reset events, we have no guarantee that the previous "signaled" ++ * state is actually correct. We need to leave both states unsignaled after ++ * leaving this function, so we always have to read(). */ ++ if (InterlockedExchange( &event->signaled, 0 ) || obj->type == ESYNC_AUTO_EVENT) ++ { ++ if (read( obj->fd, &value, sizeof(value) ) == -1 && errno != EWOULDBLOCK && errno != EAGAIN) ++ { ++ ERR("read: %s\n", strerror(errno)); ++ } ++ } ++ ++ if (obj->type == ESYNC_MANUAL_EVENT) ++ { ++ /* Release the spinlock. */ ++ event->locked = 0; ++ } ++ ++ return STATUS_SUCCESS; ++} ++ ++NTSTATUS esync_pulse_event( HANDLE handle ) ++{ ++ uint64_t value = 1; ++ struct esync *obj; ++ NTSTATUS ret; ++ ++ TRACE("%p.\n", handle); ++ ++ if ((ret = get_object( handle, &obj ))) return ret; ++ ++ /* This isn't really correct; an application could miss the write. ++ * Unfortunately we can't really do much better. Fortunately this is rarely ++ * used (and publicly deprecated). */ ++ if (write( obj->fd, &value, sizeof(value) ) == -1) ++ return errno_to_status( errno ); ++ ++ /* Try to give other threads a chance to wake up. Hopefully erring on this ++ * side is the better thing to do... */ ++ usleep(0); ++ ++ read( obj->fd, &value, sizeof(value) ); ++ ++ return STATUS_SUCCESS; ++} ++ ++NTSTATUS esync_query_event( HANDLE handle, void *info, ULONG *ret_len ) ++{ ++ struct esync *obj; ++ EVENT_BASIC_INFORMATION *out = info; ++ struct pollfd fd; ++ NTSTATUS ret; ++ ++ TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); ++ ++ if ((ret = get_object( handle, &obj ))) return ret; ++ ++ fd.fd = obj->fd; ++ fd.events = POLLIN; ++ out->EventState = poll( &fd, 1, 0 ); ++ out->EventType = (obj->type == ESYNC_AUTO_EVENT ? SynchronizationEvent : NotificationEvent); ++ if (ret_len) *ret_len = sizeof(*out); ++ ++ return STATUS_SUCCESS; ++} ++ ++NTSTATUS esync_create_mutex( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ) ++{ ++ TRACE("name %s, initial %d.\n", ++ attr ? debugstr_us(attr->ObjectName) : "", initial); ++ ++ return create_esync( ESYNC_MUTEX, handle, access, attr, initial ? 0 : 1, 0 ); ++} ++ ++NTSTATUS esync_open_mutex( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr ) ++{ ++ TRACE("name %s.\n", debugstr_us(attr->ObjectName)); ++ ++ return open_esync( ESYNC_MUTEX, handle, access, attr ); ++} ++ ++NTSTATUS esync_release_mutex( HANDLE *handle, LONG *prev ) ++{ ++ struct esync *obj; ++ struct mutex *mutex; ++ static const uint64_t value = 1; ++ NTSTATUS ret; ++ ++ TRACE("%p, %p.\n", handle, prev); ++ ++ if ((ret = get_object( handle, &obj ))) return ret; ++ mutex = obj->shm; ++ ++ /* This is thread-safe, because the only thread that can change the tid to ++ * or from our tid is ours. */ ++ if (mutex->tid != GetCurrentThreadId()) return STATUS_MUTANT_NOT_OWNED; ++ ++ if (prev) *prev = mutex->count; ++ ++ mutex->count--; ++ ++ if (!mutex->count) ++ { ++ /* This is also thread-safe, as long as signaling the file is the last ++ * thing we do. Other threads don't care about the tid if it isn't ++ * theirs. */ ++ mutex->tid = 0; ++ ++ if (write( obj->fd, &value, sizeof(value) ) == -1) ++ return errno_to_status( errno ); ++ } ++ ++ return STATUS_SUCCESS; ++} ++ ++NTSTATUS esync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ) ++{ ++ struct esync *obj; ++ struct mutex *mutex; ++ MUTANT_BASIC_INFORMATION *out = info; ++ NTSTATUS ret; ++ ++ TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); ++ ++ if ((ret = get_object( handle, &obj ))) return ret; ++ mutex = obj->shm; ++ ++ out->CurrentCount = 1 - mutex->count; ++ out->OwnedByCaller = (mutex->tid == GetCurrentThreadId()); ++ out->AbandonedState = (mutex->tid == ~0); ++ if (ret_len) *ret_len = sizeof(*out); ++ ++ return STATUS_SUCCESS; ++} ++ ++#define TICKSPERSEC 10000000 ++#define TICKSPERMSEC 10000 ++ ++static LONGLONG update_timeout( ULONGLONG end ) ++{ ++ LARGE_INTEGER now; ++ LONGLONG timeleft; ++ ++ NtQuerySystemTime( &now ); ++ timeleft = end - now.QuadPart; ++ if (timeleft < 0) timeleft = 0; ++ return timeleft; ++} ++ ++static int do_poll( struct pollfd *fds, nfds_t nfds, ULONGLONG *end ) ++{ ++ int ret; ++ ++ do ++ { ++ if (end) ++ { ++ LONGLONG timeleft = update_timeout( *end ); ++ ++#ifdef HAVE_PPOLL ++ /* We use ppoll() if available since the time granularity is better. */ ++ struct timespec tmo_p; ++ tmo_p.tv_sec = timeleft / (ULONGLONG)TICKSPERSEC; ++ tmo_p.tv_nsec = (timeleft % TICKSPERSEC) * 100; ++ ret = ppoll( fds, nfds, &tmo_p, NULL ); ++#else ++ ret = poll( fds, nfds, timeleft / TICKSPERMSEC ); ++#endif ++ } ++ else ++ ret = poll( fds, nfds, -1 ); ++ ++ /* If we receive EINTR we were probably suspended (SIGUSR1), possibly for a ++ * system APC. The right thing to do is just try again. */ ++ } while (ret < 0 && errno == EINTR); ++ ++ return ret; ++} ++ ++/* Return TRUE if abandoned. */ ++static BOOL update_grabbed_object( struct esync *obj ) ++{ ++ BOOL ret = FALSE; ++ ++ if (obj->type == ESYNC_MUTEX) ++ { ++ struct mutex *mutex = obj->shm; ++ /* We don't have to worry about a race between this and read(); the ++ * fact that we grabbed it means the count is now zero, so nobody else ++ * can (and the only thread that can release it is us). */ ++ if (mutex->tid == ~0) ++ ret = TRUE; ++ mutex->tid = GetCurrentThreadId(); ++ mutex->count++; ++ } ++ else if (obj->type == ESYNC_SEMAPHORE) ++ { ++ struct semaphore *semaphore = obj->shm; ++ /* We don't have to worry about a race between this and read(); the ++ * fact that we were able to grab it at all means the count is nonzero, ++ * and if someone else grabbed it then the count must have been >= 2, ++ * etc. */ ++ InterlockedExchangeAdd( &semaphore->count, -1 ); ++ } ++ else if (obj->type == ESYNC_AUTO_EVENT) ++ { ++ struct event *event = obj->shm; ++ /* We don't have to worry about a race between this and read(), since ++ * this is just a hint, and the real state is in the kernel object. ++ * This might already be 0, but that's okay! */ ++ event->signaled = 0; ++ } ++ ++ return ret; ++} ++ ++/* A value of STATUS_NOT_IMPLEMENTED returned from this function means that we ++ * need to delegate to server_select(). */ ++static NTSTATUS __esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ static const LARGE_INTEGER zero; ++ ++ struct esync *objs[MAXIMUM_WAIT_OBJECTS]; ++ struct pollfd fds[MAXIMUM_WAIT_OBJECTS + 1]; ++ int has_esync = 0, has_server = 0; ++ BOOL msgwait = FALSE; ++ LONGLONG timeleft; ++ LARGE_INTEGER now; ++ DWORD pollcount; ++ ULONGLONG end; ++ int64_t value; ++ ssize_t size; ++ int i, j, ret; ++ ++ /* Grab the APC fd if we don't already have it. */ ++ if (alertable && ntdll_get_thread_data()->esync_apc_fd == -1) ++ { ++ obj_handle_t fd_handle; ++ sigset_t sigset; ++ int fd = -1; ++ ++ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ SERVER_START_REQ( get_esync_apc_fd ) ++ { ++ if (!(ret = wine_server_call( req ))) ++ { ++ fd = receive_fd( &fd_handle ); ++ assert( fd_handle == GetCurrentThreadId() ); ++ } ++ } ++ SERVER_END_REQ; ++ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ ++ ntdll_get_thread_data()->esync_apc_fd = fd; ++ } ++ ++ NtQuerySystemTime( &now ); ++ if (timeout) ++ { ++ if (timeout->QuadPart == TIMEOUT_INFINITE) ++ timeout = NULL; ++ else if (timeout->QuadPart >= 0) ++ end = timeout->QuadPart; ++ else ++ end = now.QuadPart - timeout->QuadPart; ++ } ++ ++ for (i = 0; i < count; i++) ++ { ++ ret = get_object( handles[i], &objs[i] ); ++ if (ret == STATUS_SUCCESS) ++ has_esync = 1; ++ else if (ret == STATUS_NOT_IMPLEMENTED) ++ has_server = 1; ++ else ++ return ret; ++ } ++ ++ if (objs[count - 1] && objs[count - 1]->type == ESYNC_QUEUE) ++ msgwait = TRUE; ++ ++ if (has_esync && has_server) ++ FIXME("Can't wait on esync and server objects at the same time!\n"); ++ else if (has_server) ++ return STATUS_NOT_IMPLEMENTED; ++ ++ if (TRACE_ON(esync)) ++ { ++ TRACE("Waiting for %s of %d handles:", wait_any ? "any" : "all", count); ++ for (i = 0; i < count; i++) ++ TRACE(" %p", handles[i]); ++ ++ if (msgwait) ++ TRACE(" or driver events"); ++ if (alertable) ++ TRACE(", alertable"); ++ ++ if (!timeout) ++ TRACE(", timeout = INFINITE.\n"); ++ else ++ { ++ timeleft = update_timeout( end ); ++ TRACE(", timeout = %ld.%07ld sec.\n", ++ (long) timeleft / TICKSPERSEC, (long) timeleft % TICKSPERSEC); ++ } ++ } ++ ++ if (wait_any || count == 1) ++ { ++ /* Try to check objects now, so we can obviate poll() at least. */ ++ for (i = 0; i < count; i++) ++ { ++ struct esync *obj = objs[i]; ++ ++ if (obj) ++ { ++ switch (obj->type) ++ { ++ case ESYNC_MUTEX: ++ { ++ struct mutex *mutex = obj->shm; ++ ++ if (mutex->tid == GetCurrentThreadId()) ++ { ++ TRACE("Woken up by handle %p [%d].\n", handles[i], i); ++ mutex->count++; ++ return i; ++ } ++ else if (!mutex->count) ++ { ++ if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) ++ { ++ if (mutex->tid == ~0) ++ { ++ TRACE("Woken up by abandoned mutex %p [%d].\n", handles[i], i); ++ i += STATUS_ABANDONED_WAIT_0; ++ } ++ else ++ TRACE("Woken up by handle %p [%d].\n", handles[i], i); ++ mutex->tid = GetCurrentThreadId(); ++ mutex->count++; ++ return i; ++ } ++ } ++ break; ++ } ++ case ESYNC_SEMAPHORE: ++ { ++ struct semaphore *semaphore = obj->shm; ++ ++ if (semaphore->count) ++ { ++ if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) ++ { ++ TRACE("Woken up by handle %p [%d].\n", handles[i], i); ++ InterlockedDecrement( &semaphore->count ); ++ return i; ++ } ++ } ++ break; ++ } ++ case ESYNC_AUTO_EVENT: ++ { ++ struct event *event = obj->shm; ++ ++ if (event->signaled) ++ { ++ if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) ++ { ++ TRACE("Woken up by handle %p [%d].\n", handles[i], i); ++ event->signaled = 0; ++ return i; ++ } ++ } ++ break; ++ } ++ case ESYNC_MANUAL_EVENT: ++ { ++ struct event *event = obj->shm; ++ ++ if (event->signaled) ++ { ++ TRACE("Woken up by handle %p [%d].\n", handles[i], i); ++ return i; ++ } ++ break; ++ } ++ case ESYNC_AUTO_SERVER: ++ case ESYNC_MANUAL_SERVER: ++ case ESYNC_QUEUE: ++ /* We can't wait on any of these. Fortunately I don't think ++ * they'll ever be uncontended anyway (at least, they won't be ++ * performance-critical). */ ++ break; ++ } ++ } ++ ++ fds[i].fd = obj ? obj->fd : -1; ++ fds[i].events = POLLIN; ++ } ++ if (alertable) ++ { ++ fds[i].fd = ntdll_get_thread_data()->esync_apc_fd; ++ fds[i].events = POLLIN; ++ i++; ++ } ++ pollcount = i; ++ ++ while (1) ++ { ++ ret = do_poll( fds, pollcount, timeout ? &end : NULL ); ++ if (ret > 0) ++ { ++ /* We must check this first! The server may set an event that ++ * we're waiting on, but we need to return STATUS_USER_APC. */ ++ if (alertable) ++ { ++ if (fds[pollcount - 1].revents & POLLIN) ++ goto userapc; ++ } ++ ++ /* Find out which object triggered the wait. */ ++ for (i = 0; i < count; i++) ++ { ++ struct esync *obj = objs[i]; ++ ++ if (fds[i].revents & (POLLERR | POLLHUP | POLLNVAL)) ++ { ++ ERR("Polling on fd %d returned %#x.\n", fds[i].fd, fds[i].revents); ++ return STATUS_INVALID_HANDLE; ++ } ++ ++ if (obj) ++ { ++ if (obj->type == ESYNC_MANUAL_EVENT ++ || obj->type == ESYNC_MANUAL_SERVER ++ || obj->type == ESYNC_QUEUE) ++ { ++ /* Don't grab the object, just check if it's signaled. */ ++ if (fds[i].revents & POLLIN) ++ { ++ TRACE("Woken up by handle %p [%d].\n", handles[i], i); ++ return i; ++ } ++ } ++ else ++ { ++ if ((size = read( fds[i].fd, &value, sizeof(value) )) == sizeof(value)) ++ { ++ /* We found our object. */ ++ TRACE("Woken up by handle %p [%d].\n", handles[i], i); ++ if (update_grabbed_object( obj )) ++ return STATUS_ABANDONED_WAIT_0 + i; ++ return i; ++ } ++ } ++ } ++ } ++ ++ /* If we got here, someone else stole (or reset, etc.) whatever ++ * we were waiting for. So keep waiting. */ ++ NtQuerySystemTime( &now ); ++ } ++ else ++ goto err; ++ } ++ } ++ else ++ { ++ /* Wait-all is a little trickier to implement correctly. Fortunately, ++ * it's not as common. ++ * ++ * The idea is basically just to wait in sequence on every object in the ++ * set. Then when we're done, try to grab them all in a tight loop. If ++ * that fails, release any resources we've grabbed (and yes, we can ++ * reliably do this—it's just mutexes and semaphores that we have to ++ * put back, and in both cases we just put back 1), and if any of that ++ * fails we start over. ++ * ++ * What makes this inherently bad is that we might temporarily grab a ++ * resource incorrectly. Hopefully it'll be quick (and hey, it won't ++ * block on wineserver) so nobody will notice. Besides, consider: if ++ * object A becomes signaled but someone grabs it before we can grab it ++ * and everything else, then they could just as well have grabbed it ++ * before it became signaled. Similarly if object A was signaled and we ++ * were blocking on object B, then B becomes available and someone grabs ++ * A before we can, then they might have grabbed A before B became ++ * signaled. In either case anyone who tries to wait on A or B will be ++ * waiting for an instant while we put things back. */ ++ ++ while (1) ++ { ++tryagain: ++ /* First step: try to poll on each object in sequence. */ ++ fds[0].events = POLLIN; ++ pollcount = 1; ++ if (alertable) ++ { ++ /* We also need to wait on APCs. */ ++ fds[1].fd = ntdll_get_thread_data()->esync_apc_fd; ++ fds[1].events = POLLIN; ++ pollcount++; ++ } ++ for (i = 0; i < count; i++) ++ { ++ struct esync *obj = objs[i]; ++ ++ fds[0].fd = obj ? obj->fd : -1; ++ ++ if (obj && obj->type == ESYNC_MUTEX) ++ { ++ /* It might be ours. */ ++ struct mutex *mutex = obj->shm; ++ ++ if (mutex->tid == GetCurrentThreadId()) ++ continue; ++ } ++ ++ ret = do_poll( fds, pollcount, timeout ? &end : NULL ); ++ if (ret <= 0) ++ goto err; ++ else if (alertable && (fds[1].revents & POLLIN)) ++ goto userapc; ++ ++ if (fds[0].revents & (POLLHUP | POLLERR | POLLNVAL)) ++ { ++ ERR("Polling on fd %d returned %#x.\n", fds[0].fd, fds[0].revents); ++ return STATUS_INVALID_HANDLE; ++ } ++ } ++ ++ /* If we got here and we haven't timed out, that means all of the ++ * handles were signaled. Check to make sure they still are. */ ++ for (i = 0; i < count; i++) ++ { ++ fds[i].fd = objs[i] ? objs[i]->fd : -1; ++ fds[i].events = POLLIN; ++ } ++ /* There's no reason to check for APCs here. */ ++ pollcount = i; ++ ++ /* Poll everything to see if they're still signaled. */ ++ ret = poll( fds, pollcount, 0 ); ++ if (ret == pollcount) ++ { ++ BOOL abandoned = FALSE; ++ ++ /* Quick, grab everything. */ ++ for (i = 0; i < count; i++) ++ { ++ struct esync *obj = objs[i]; ++ ++ switch (obj->type) ++ { ++ case ESYNC_MUTEX: ++ { ++ struct mutex *mutex = obj->shm; ++ if (mutex->tid == GetCurrentThreadId()) ++ break; ++ /* otherwise fall through */ ++ } ++ case ESYNC_SEMAPHORE: ++ case ESYNC_AUTO_EVENT: ++ if ((size = read( fds[i].fd, &value, sizeof(value) )) != sizeof(value)) ++ { ++ /* We were too slow. Put everything back. */ ++ value = 1; ++ for (j = i; j >= 0; j--) ++ { ++ if (write( obj->fd, &value, sizeof(value) ) == -1) ++ return errno_to_status( errno ); ++ } ++ ++ goto tryagain; /* break out of two loops and a switch */ ++ } ++ break; ++ default: ++ /* If a manual-reset event changed between there and ++ * here, it's shouldn't be a problem. */ ++ break; ++ } ++ } ++ ++ /* If we got here, we successfully waited on every object. */ ++ /* Make sure to let ourselves know that we grabbed the mutexes ++ * and semaphores. */ ++ for (i = 0; i < count; i++) ++ abandoned |= update_grabbed_object( objs[i] ); ++ ++ if (abandoned) ++ { ++ TRACE("Wait successful, but some object(s) were abandoned.\n"); ++ return STATUS_ABANDONED; ++ } ++ TRACE("Wait successful.\n"); ++ return STATUS_SUCCESS; ++ } ++ ++ /* If we got here, ppoll() returned less than all of our objects. ++ * So loop back to the beginning and try again. */ ++ } /* while(1) */ ++ } /* else (wait-all) */ ++ ++err: ++ /* We should only get here if poll() failed. */ ++ ++ if (ret == 0) ++ { ++ TRACE("Wait timed out.\n"); ++ return STATUS_TIMEOUT; ++ } ++ else ++ { ++ ERR("ppoll failed: %s\n", strerror(errno)); ++ return errno_to_status( errno ); ++ } ++ ++userapc: ++ TRACE("Woken up by user APC.\n"); ++ ++ /* We have to make a server call anyway to get the APC to execute, so just ++ * delegate down to server_select(). */ ++ ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &zero ); ++ ++ /* This can happen if we received a system APC, and the APC fd was woken up ++ * before we got SIGUSR1. poll() doesn't return EINTR in that case. The ++ * right thing to do seems to be to return STATUS_USER_APC anyway. */ ++ if (ret == STATUS_TIMEOUT) ret = STATUS_USER_APC; ++ return ret; ++} ++ ++/* We need to let the server know when we are doing a message wait, and when we ++ * are done with one, so that all of the code surrounding hung queues works. ++ * We also need this for WaitForInputIdle(). */ ++static void server_set_msgwait( int in_msgwait ) ++{ ++ SERVER_START_REQ( esync_msgwait ) ++ { ++ req->in_msgwait = in_msgwait; ++ wine_server_call( req ); ++ } ++ SERVER_END_REQ; ++} ++ ++/* This is a very thin wrapper around the proper implementation above. The ++ * purpose is to make sure the server knows when we are doing a message wait. ++ * This is separated into a wrapper function since there are at least a dozen ++ * exit paths from esync_wait_objects(). */ ++NTSTATUS esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ BOOL msgwait = FALSE; ++ struct esync *obj; ++ NTSTATUS ret; ++ ++ if (count && !get_object( handles[count - 1], &obj ) && obj->type == ESYNC_QUEUE) ++ { ++ msgwait = TRUE; ++ server_set_msgwait( 1 ); ++ } ++ ++ ret = __esync_wait_objects( count, handles, wait_any, alertable, timeout ); ++ ++ if (msgwait) ++ server_set_msgwait( 0 ); ++ ++ return ret; ++} ++ ++NTSTATUS esync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, ++ const LARGE_INTEGER *timeout ) ++{ ++ struct esync *obj; ++ NTSTATUS ret; ++ ++ if ((ret = get_object( signal, &obj ))) return ret; ++ ++ switch (obj->type) ++ { ++ case ESYNC_SEMAPHORE: ++ ret = esync_release_semaphore( signal, 1, NULL ); ++ break; ++ case ESYNC_AUTO_EVENT: ++ case ESYNC_MANUAL_EVENT: ++ ret = esync_set_event( signal ); ++ break; ++ case ESYNC_MUTEX: ++ ret = esync_release_mutex( signal, NULL ); ++ break; ++ default: ++ return STATUS_OBJECT_TYPE_MISMATCH; ++ } ++ if (ret) return ret; ++ ++ return esync_wait_objects( 1, &wait, TRUE, alertable, timeout ); ++} ++ ++void esync_init(void) ++{ ++ struct stat st; ++ ++ if (!do_esync()) ++ { ++ /* make sure the server isn't running with WINEESYNC */ ++ HANDLE handle; ++ NTSTATUS ret; ++ ++ ret = create_esync( 0, &handle, 0, NULL, 0, 0 ); ++ if (ret != STATUS_NOT_IMPLEMENTED) ++ { ++ ERR("Server is running with WINEESYNC but this process is not, please enable WINEESYNC or restart wineserver.\n"); ++ exit(1); ++ } ++ ++ return; ++ } ++ ++ if (stat( config_dir, &st ) == -1) ++ ERR("Cannot stat %s\n", config_dir); ++ ++ if (st.st_ino != (unsigned long)st.st_ino) ++ sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); ++ else ++ sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino ); ++ ++ if ((shm_fd = shm_open( shm_name, O_RDWR, 0644 )) == -1) ++ { ++ /* probably the server isn't running with WINEESYNC, tell the user and bail */ ++ if (errno == ENOENT) ++ ERR("Failed to open esync shared memory file; make sure no stale wineserver instances are running without WINEESYNC.\n"); ++ else ++ ERR("Failed to initialize shared memory: %s\n", strerror( errno )); ++ exit(1); ++ } ++ ++ pagesize = sysconf( _SC_PAGESIZE ); ++ ++ shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); ++ shm_addrs_size = 128; ++} +diff --git a/dlls/ntdll/unix/esync.h b/dlls/ntdll/unix/esync.h +new file mode 100644 +index 00000000000..188304f3be7 +--- /dev/null ++++ b/dlls/ntdll/unix/esync.h +@@ -0,0 +1,61 @@ ++/* ++ * eventfd-based synchronization objects ++ * ++ * Copyright (C) 2018 Zebediah Figura ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA ++ */ ++ ++extern int do_esync(void); ++extern void esync_init(void); ++extern NTSTATUS esync_close( HANDLE handle ); ++ ++extern NTSTATUS esync_create_semaphore(HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max); ++extern NTSTATUS esync_open_semaphore( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr ); ++extern NTSTATUS esync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ); ++extern NTSTATUS esync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ); ++ ++extern NTSTATUS esync_create_event( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr, EVENT_TYPE type, BOOLEAN initial ); ++extern NTSTATUS esync_open_event( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr ); ++extern NTSTATUS esync_pulse_event( HANDLE handle ); ++extern NTSTATUS esync_query_event( HANDLE handle, void *info, ULONG *ret_len ); ++extern NTSTATUS esync_reset_event( HANDLE handle ); ++extern NTSTATUS esync_set_event( HANDLE handle ); ++ ++extern NTSTATUS esync_create_mutex( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ); ++extern NTSTATUS esync_open_mutex( HANDLE *handle, ACCESS_MASK access, ++ const OBJECT_ATTRIBUTES *attr ); ++extern NTSTATUS esync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ); ++extern NTSTATUS esync_release_mutex( HANDLE *handle, LONG *prev ); ++ ++extern NTSTATUS esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ); ++extern NTSTATUS esync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, ++ const LARGE_INTEGER *timeout ); ++ ++ ++/* We have to synchronize on the fd cache mutex so that our calls to receive_fd ++ * don't race with theirs. It looks weird, I know. ++ * ++ * If we weren't trying to avoid touching the code I'd rename the mutex to ++ * "server_fd_mutex" or something similar. */ ++extern pthread_mutex_t fd_cache_mutex; ++ ++extern int receive_fd( obj_handle_t *handle ); +diff --git a/dlls/ntdll/unix/loader.c b/dlls/ntdll/unix/loader.c +index 33859dabf41..19b73256ef2 100644 +--- a/dlls/ntdll/unix/loader.c ++++ b/dlls/ntdll/unix/loader.c +@@ -86,6 +86,7 @@ + #include "winioctl.h" + #include "winternl.h" + #include "unix_private.h" ++#include "esync.h" + #include "wine/list.h" + #include "wine/debug.h" + +@@ -1564,6 +1565,7 @@ static void start_main_thread(void) + signal_init_thread( teb ); + dbg_init(); + startup_info_size = server_init_process(); ++ esync_init(); + virtual_map_user_shared_data(); + init_cpu_info(); + syscall_dispatcher = signal_init_syscalls(); +diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c +index e12ed3a668a..34d4c80eee5 100644 +--- a/dlls/ntdll/unix/server.c ++++ b/dlls/ntdll/unix/server.c +@@ -94,6 +94,7 @@ + #include "wine/server.h" + #include "wine/debug.h" + #include "unix_private.h" ++#include "esync.h" + #include "ddk/wdm.h" + + WINE_DEFAULT_DEBUG_CHANNEL(server); +@@ -134,7 +135,7 @@ timeout_t server_start_time = 0; /* time of server startup */ + static int fd_socket = -1; /* socket to exchange file descriptors with the server */ + static int initial_cwd = -1; + static pid_t server_pid; +-static pthread_mutex_t fd_cache_mutex = PTHREAD_MUTEX_INITIALIZER; ++pthread_mutex_t fd_cache_mutex = PTHREAD_MUTEX_INITIALIZER; + + /* atomically exchange a 64-bit value */ + static inline LONG64 interlocked_xchg64( LONG64 *dest, LONG64 val ) +@@ -843,7 +844,7 @@ void CDECL wine_server_send_fd( int fd ) + * + * Receive a file descriptor passed from the server. + */ +-static int receive_fd( obj_handle_t *handle ) ++int receive_fd( obj_handle_t *handle ) + { + struct iovec vec; + struct msghdr msghdr; +@@ -1697,6 +1698,9 @@ NTSTATUS WINAPI NtClose( HANDLE handle ) + * retrieve it again */ + fd = remove_fd_from_cache( handle ); + ++ if (do_esync()) ++ esync_close( handle ); ++ + SERVER_START_REQ( close_handle ) + { + req->handle = wine_server_obj_handle( handle ); +diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c +index a13e53a..a180ed4 100644 +--- a/dlls/ntdll/unix/sync.c ++++ b/dlls/ntdll/unix/sync.c +@@ -72,6 +72,7 @@ + #include "wine/exception.h" + #include "wine/debug.h" + #include "unix_private.h" ++#include "esync.h" + + WINE_DEFAULT_DEBUG_CHANNEL(sync); + +@@ -325,6 +326,9 @@ NTSTATUS WINAPI NtCreateSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJ + if (max <= 0 || initial < 0 || initial > max) return STATUS_INVALID_PARAMETER; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + ++ if (do_esync()) ++ return esync_create_semaphore( handle, access, attr, initial, max ); ++ + SERVER_START_REQ( create_semaphore ) + { + req->access = access; +@@ -349,6 +353,10 @@ NTSTATUS WINAPI NtOpenSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJEC + unsigned int ret; + + *handle = 0; ++ ++ if (do_esync()) ++ return esync_open_semaphore( handle, access, attr ); ++ + if ((ret = validate_open_object_attributes( attr ))) return ret; + + SERVER_START_REQ( open_semaphore ) +@@ -385,6 +393,9 @@ NTSTATUS WINAPI NtQuerySemaphore( HANDLE handle, SEMAPHORE_INFORMATION_CLASS cla + + if (len != sizeof(SEMAPHORE_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + ++ if (do_esync()) ++ return esync_query_semaphore( handle, info, ret_len ); ++ + SERVER_START_REQ( query_semaphore ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -407,6 +418,9 @@ NTSTATUS WINAPI NtReleaseSemaphore( HANDLE handle, ULONG count, ULONG *previous + { + unsigned int ret; + ++ if (do_esync()) ++ return esync_release_semaphore( handle, count, previous ); ++ + SERVER_START_REQ( release_semaphore ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -433,6 +447,10 @@ NTSTATUS WINAPI NtCreateEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_ + + *handle = 0; + if (type != NotificationEvent && type != SynchronizationEvent) return STATUS_INVALID_PARAMETER; ++ ++ if (do_esync()) ++ return esync_create_event( handle, access, attr, type, state ); ++ + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + + SERVER_START_REQ( create_event ) +@@ -461,6 +479,9 @@ NTSTATUS WINAPI NtOpenEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + ++ if (do_esync()) ++ return esync_open_event( handle, access, attr ); ++ + SERVER_START_REQ( open_event ) + { + req->access = access; +@@ -481,8 +502,12 @@ NTSTATUS WINAPI NtOpenEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT + */ + NTSTATUS WINAPI NtSetEvent( HANDLE handle, LONG *prev_state ) + { ++ /* This comment is a dummy to make sure this patch applies in the right place. */ + unsigned int ret; + ++ if (do_esync()) ++ return esync_set_event( handle ); ++ + SERVER_START_REQ( event_op ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -500,8 +525,12 @@ NTSTATUS WINAPI NtSetEvent( HANDLE handle, LONG *prev_state ) + */ + NTSTATUS WINAPI NtResetEvent( HANDLE handle, LONG *prev_state ) + { ++ /* This comment is a dummy to make sure this patch applies in the right place. */ + unsigned int ret; + ++ if (do_esync()) ++ return esync_reset_event( handle ); ++ + SERVER_START_REQ( event_op ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -531,6 +560,9 @@ NTSTATUS WINAPI NtPulseEvent( HANDLE handle, LONG *prev_state ) + { + unsigned int ret; + ++ if (do_esync()) ++ return esync_pulse_event( handle ); ++ + SERVER_START_REQ( event_op ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -563,6 +595,9 @@ NTSTATUS WINAPI NtQueryEvent( HANDLE handle, EVENT_INFORMATION_CLASS class, + + if (len != sizeof(EVENT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + ++ if (do_esync()) ++ return esync_query_event( handle, info, ret_len ); ++ + SERVER_START_REQ( query_event ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -589,6 +624,10 @@ NTSTATUS WINAPI NtCreateMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT + struct object_attributes *objattr; + + *handle = 0; ++ ++ if (do_esync()) ++ return esync_create_mutex( handle, access, attr, owned ); ++ + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + + SERVER_START_REQ( create_mutex ) +@@ -616,6 +655,9 @@ NTSTATUS WINAPI NtOpenMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT_A + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + ++ if (do_esync()) ++ return esync_open_mutex( handle, access, attr ); ++ + SERVER_START_REQ( open_mutex ) + { + req->access = access; +@@ -638,6 +680,9 @@ NTSTATUS WINAPI NtReleaseMutant( HANDLE handle, LONG *prev_count ) + { + unsigned int ret; + ++ if (do_esync()) ++ return esync_release_mutex( handle, prev_count ); ++ + SERVER_START_REQ( release_mutex ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -668,6 +713,9 @@ NTSTATUS WINAPI NtQueryMutant( HANDLE handle, MUTANT_INFORMATION_CLASS class, + + if (len != sizeof(MUTANT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + ++ if (do_esync()) ++ return esync_query_mutex( handle, info, ret_len ); ++ + SERVER_START_REQ( query_mutex ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -1437,6 +1485,13 @@ NTSTATUS WINAPI NtWaitForMultipleObjects( DWORD count, const HANDLE *handles, BO + + if (!count || count > MAXIMUM_WAIT_OBJECTS) return STATUS_INVALID_PARAMETER_1; + ++ if (do_esync()) ++ { ++ NTSTATUS ret = esync_wait_objects( count, handles, wait_any, alertable, timeout ); ++ if (ret != STATUS_NOT_IMPLEMENTED) ++ return ret; ++ } ++ + if (alertable) flags |= SELECT_ALERTABLE; + select_op.wait.op = wait_any ? SELECT_WAIT : SELECT_WAIT_ALL; + for (i = 0; i < count; i++) select_op.wait.handles[i] = wine_server_obj_handle( handles[i] ); +@@ -1462,6 +1517,9 @@ NTSTATUS WINAPI NtSignalAndWaitForSingleObject( HANDLE signal, HANDLE wait, + select_op_t select_op; + UINT flags = SELECT_INTERRUPTIBLE; + ++ if (do_esync()) ++ return esync_signal_and_wait( signal, wait, alertable, timeout ); ++ + if (!signal) return STATUS_INVALID_HANDLE; + + if (alertable) flags |= SELECT_ALERTABLE; +diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h +index 1ff56632496..e7033932bcb 100644 +--- a/dlls/ntdll/unix/unix_private.h ++++ b/dlls/ntdll/unix/unix_private.h +@@ -58,6 +58,7 @@ struct ntdll_thread_data + { + void *cpu_data[16]; /* reserved for CPU-specific data */ + void *kernel_stack; /* stack for thread startup and kernel syscalls */ ++ int esync_apc_fd; /* fd to wait on for user APCs */ + int request_fd; /* fd for sending server requests */ + int reply_fd; /* fd for receiving server replies */ + int wait_fd[2]; /* fd for sleeping server requests */ +diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c +index cac8ce4d368..bd89649e769 100644 +--- a/dlls/ntdll/unix/virtual.c ++++ b/dlls/ntdll/unix/virtual.c +@@ -2682,6 +2682,7 @@ static void init_teb( TEB *teb, PEB *peb ) + teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer; + teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer); + thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch; ++ thread_data->esync_apc_fd = -1; + thread_data->request_fd = -1; + thread_data->reply_fd = -1; + thread_data->wait_fd[0] = -1; +diff --git a/dlls/rpcrt4/rpc_server.c b/dlls/rpcrt4/rpc_server.c +index 12260b7298b..a7cad5e273f 100644 +--- a/dlls/rpcrt4/rpc_server.c ++++ b/dlls/rpcrt4/rpc_server.c +@@ -699,10 +699,6 @@ static DWORD CALLBACK RPCRT4_server_thread(LPVOID the_arg) + } + LeaveCriticalSection(&cps->cs); + +- EnterCriticalSection(&listen_cs); +- CloseHandle(cps->server_thread); +- cps->server_thread = NULL; +- LeaveCriticalSection(&listen_cs); + TRACE("done\n"); + return 0; + } +@@ -1570,7 +1566,10 @@ RPC_STATUS WINAPI RpcMgmtWaitServerListen( void ) + LIST_FOR_EACH_ENTRY(protseq, &protseqs, RpcServerProtseq, entry) + { + if ((wait_thread = protseq->server_thread)) ++ { ++ protseq->server_thread = NULL; + break; ++ } + } + LeaveCriticalSection(&server_cs); + if (!wait_thread) +@@ -1579,6 +1578,7 @@ RPC_STATUS WINAPI RpcMgmtWaitServerListen( void ) + TRACE("waiting for thread %lu\n", GetThreadId(wait_thread)); + LeaveCriticalSection(&listen_cs); + WaitForSingleObject(wait_thread, INFINITE); ++ CloseHandle(wait_thread); + EnterCriticalSection(&listen_cs); + } + if (listen_done_event == event) +diff --git a/include/config.h.in b/include/config.h.in +index 6ca4e1bc8c8..f315921dee8 100644 +--- a/include/config.h.in ++++ b/include/config.h.in +@@ -318,6 +318,9 @@ + /* Define to 1 if you have the 'posix_fallocate' function. */ + #undef HAVE_POSIX_FALLOCATE + ++/* Define to 1 if you have the 'ppoll' function. */ ++#undef HAVE_PPOLL ++ + /* Define to 1 if you have the 'prctl' function. */ + #undef HAVE_PRCTL + +@@ -376,6 +376,9 @@ + /* Define to 1 if 'interface_id' is a member of 'sg_io_hdr_t'. */ + #undef HAVE_SG_IO_HDR_T_INTERFACE_ID + ++/* Define to 1 if you have the 'shm_open' function. */ ++#undef HAVE_SHM_OPEN ++ + /* Define to 1 if 'si_fd' is a member of 'siginfo_t'. */ + #undef HAVE_SIGINFO_T_SI_FD + +@@ -504,6 +504,9 @@ + /* Define to 1 if you have the header file. */ + #undef HAVE_SYS_EPOLL_H + ++/* Define to 1 if you have the header file. */ ++#undef HAVE_SYS_EVENTFD_H ++ + /* Define to 1 if you have the header file. */ + #undef HAVE_SYS_EVENT_H + +diff --git a/server/Makefile.in b/server/Makefile.in +index 9a695cefc30..8bd612b4728 100644 +--- a/server/Makefile.in ++++ b/server/Makefile.in +@@ -11,6 +11,7 @@ C_SRCS = \ + debugger.c \ + device.c \ + directory.c \ ++ esync.c \ + event.c \ + fd.c \ + file.c \ +diff --git a/server/async.c b/server/async.c +index 24fed811da2..1b4f86a1b8b 100644 +--- a/server/async.c ++++ b/server/async.c +@@ -70,6 +70,7 @@ static const struct object_ops async_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + async_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + async_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -484,6 +485,7 @@ static const struct object_ops iosb_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/atom.c b/server/atom.c +index b61fa276661..73d858fef82 100644 +--- a/server/atom.c ++++ b/server/atom.c +@@ -80,6 +80,7 @@ static const struct object_ops atom_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/change.c b/server/change.c +index 2cccc5d7a88..85afb0cbdc5 100644 +--- a/server/change.c ++++ b/server/change.c +@@ -115,6 +115,7 @@ static const struct object_ops dir_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + dir_get_fd, /* get_fd */ +diff --git a/server/clipboard.c b/server/clipboard.c +index e4546832620..54a5fb683cc 100644 +--- a/server/clipboard.c ++++ b/server/clipboard.c +@@ -77,6 +77,7 @@ static const struct object_ops clipboard_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/completion.c b/server/completion.c +index c9b6995e0b2..ef66260c991 100644 +--- a/server/completion.c ++++ b/server/completion.c +@@ -92,6 +92,7 @@ static const struct object_ops completion_wait_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + completion_wait_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + completion_wait_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -166,6 +167,7 @@ static const struct object_ops completion_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + completion_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/console.c b/server/console.c +index d1e3cae8919..78635f3fae1 100644 +--- a/server/console.c ++++ b/server/console.c +@@ -42,6 +42,7 @@ + #include "wincon.h" + #include "winternl.h" + #include "wine/condrv.h" ++#include "esync.h" + + struct screen_buffer; + +@@ -82,6 +82,7 @@ static const struct object_ops console_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + console_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_get_fd, /* get_fd */ +@@ -133,6 +135,7 @@ struct console_server + unsigned int once_input : 1; /* flag if input thread has already been requested */ + int term_fd; /* UNIX terminal fd */ + struct termios termios; /* original termios */ ++ int esync_fd; /* esync file descriptor */ + }; + + static void console_server_dump( struct object *obj, int verbose ); +@@ -142,6 +145,7 @@ static struct fd *console_server_get_fd( struct object *obj ); + static struct object *console_server_lookup_name( struct object *obj, struct unicode_str *name, unsigned int attr ); + static struct object *console_server_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); ++static int console_server_get_esync_fd( struct object *obj, enum esync_type *type ); + + static const struct object_ops console_server_ops = + { +@@ -151,6 +155,7 @@ static const struct object_ops console_server_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + console_server_signaled, /* signaled */ ++ console_server_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_server_get_fd, /* get_fd */ +@@ -218,6 +223,7 @@ static const struct object_ops screen_buffer_ops = + screen_buffer_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + screen_buffer_get_fd, /* get_fd */ +@@ -265,6 +271,7 @@ static const struct object_ops console_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -297,6 +301,7 @@ static const struct object_ops console_input_ops = + console_input_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_input_get_fd, /* get_fd */ +@@ -327,6 +332,7 @@ static const struct object_ops console_output_ops = + console_output_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_output_get_fd, /* get_fd */ +@@ -365,6 +371,7 @@ static const struct object_ops console_connection_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_connection_get_fd, /* get_fd */ +@@ -526,6 +529,8 @@ static void disconnect_console_server( struct console_server *server ) + list_remove( &call->entry ); + console_host_ioctl_terminate( call, STATUS_CANCELLED ); + } ++ if (do_esync()) ++ esync_clear( server->esync_fd ); + while (!list_empty( &server->read_queue )) + { + struct console_host_ioctl *call = LIST_ENTRY( list_head( &server->read_queue ), struct console_host_ioctl, entry ); +@@ -759,6 +767,13 @@ static int console_server_signaled( struct object *obj, struct wait_queue_entry + return !server->console || !list_empty( &server->queue ); + } + ++static int console_server_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct console_server *server = (struct console_server*)obj; ++ *type = ESYNC_MANUAL_SERVER; ++ return server->esync_fd; ++} ++ + static struct fd *console_server_get_fd( struct object* obj ) + { + struct console_server *server = (struct console_server*)obj; +@@ -783,6 +798,7 @@ static struct object *create_console_server( void ) + list_init( &server->queue ); + list_init( &server->read_queue ); + server->fd = alloc_pseudo_fd( &console_server_fd_ops, &server->obj, FILE_SYNCHRONOUS_IO_NONALERT ); ++ server->esync_fd = -1; + if (!server->fd) + { + release_object( server ); +@@ -790,6 +806,10 @@ static struct object *create_console_server( void ) + return NULL; + } + allow_fd_caching(server->fd); ++ server->esync_fd = -1; ++ ++ if (do_esync()) ++ server->esync_fd = esync_create_fd( 0, 0 ); + + return &server->obj; + } +@@ -1388,6 +1404,8 @@ DECL_HANDLER(get_next_console_request) + /* set result of previous ioctl */ + ioctl = LIST_ENTRY( list_head( &server->queue ), struct console_host_ioctl, entry ); + list_remove( &ioctl->entry ); ++ if (do_esync() && list_empty( &server->queue )) ++ esync_clear( server->esync_fd ); + } + + if (ioctl) +@@ -1486,6 +1504,8 @@ DECL_HANDLER(get_next_console_request) + { + set_error( STATUS_PENDING ); + } ++ if (do_esync() && list_empty( &server->queue )) ++ esync_clear( server->esync_fd ); + + release_object( server ); + } +diff --git a/server/debugger.c b/server/debugger.c +index e4a6c1e43a8..c37f97aa0b6 100644 +--- a/server/debugger.c ++++ b/server/debugger.c +@@ -73,6 +73,7 @@ static const struct object_ops debug_event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + debug_event_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -101,6 +102,7 @@ static const struct object_ops debug_ctx_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + debug_obj_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/device.c b/server/device.c +index 652da83e1e2..b8ce131c732 100644 +--- a/server/device.c ++++ b/server/device.c +@@ -39,6 +39,7 @@ + #include "handle.h" + #include "request.h" + #include "process.h" ++#include "esync.h" + + /* IRP object */ + +@@ -68,6 +69,7 @@ static const struct object_ops irp_call_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -94,10 +96,12 @@ struct device_manager + struct list requests; /* list of pending irps across all devices */ + struct irp_call *current_call; /* call currently executed on client side */ + struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */ ++ int esync_fd; /* esync file descriptor */ + }; + + static void device_manager_dump( struct object *obj, int verbose ); + static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type ); + static void device_manager_destroy( struct object *obj ); + + static const struct object_ops device_manager_ops = +@@ -108,6 +112,7 @@ static const struct object_ops device_manager_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + device_manager_signaled, /* signaled */ ++ device_manager_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -152,6 +157,7 @@ static const struct object_ops device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -203,6 +209,7 @@ static const struct object_ops device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + device_file_get_fd, /* get_fd */ +@@ -747,6 +754,9 @@ static void delete_file( struct device_file *file ) + /* terminate all pending requests */ + LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry ) + { ++ if (do_esync() && file->device->manager && list_empty( &file->device->manager->requests )) ++ esync_clear( file->device->manager->esync_fd ); ++ + list_remove( &irp->mgr_entry ); + set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 ); + } +@@ -782,6 +792,13 @@ static int device_manager_signaled( struct object *obj, struct wait_queue_entry + return !list_empty( &manager->requests ); + } + ++static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct device_manager *manager = (struct device_manager *)obj; ++ *type = ESYNC_MANUAL_SERVER; ++ return manager->esync_fd; ++} ++ + static void device_manager_destroy( struct object *obj ) + { + struct device_manager *manager = (struct device_manager *)obj; +@@ -816,6 +833,9 @@ static void device_manager_destroy( struct object *obj ) + assert( !irp->file && !irp->async ); + release_object( irp ); + } ++ ++ if (do_esync()) ++ close( manager->esync_fd ); + } + + static struct device_manager *create_device_manager(void) +@@ -828,6 +848,9 @@ static struct device_manager *create_device_manager(void) + list_init( &manager->devices ); + list_init( &manager->requests ); + wine_rb_init( &manager->kernel_objects, compare_kernel_object ); ++ ++ if (do_esync()) ++ manager->esync_fd = esync_create_fd( 0, 0 ); + } + return manager; + } +@@ -993,6 +1016,9 @@ DECL_HANDLER(get_next_device_request) + /* we already own the object if it's only on manager queue */ + if (irp->file) grab_object( irp ); + manager->current_call = irp; ++ ++ if (do_esync() && list_empty( &manager->requests )) ++ esync_clear( manager->esync_fd ); + } + else close_handle( current->process, reply->next ); + } +diff --git a/server/directory.c b/server/directory.c +index 9e3fef1177e..007ec1002ac 100644 +--- a/server/directory.c ++++ b/server/directory.c +@@ -58,6 +58,7 @@ static const struct object_ops object_type_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -97,6 +98,7 @@ static const struct object_ops directory_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/esync.c b/server/esync.c +new file mode 100644 +index 00000000000..5c641fdbe01 +--- /dev/null ++++ b/server/esync.c +@@ -0,0 +1,587 @@ ++/* ++ * eventfd-based synchronization objects ++ * ++ * Copyright (C) 2018 Zebediah Figura ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA ++ */ ++ ++#include "config.h" ++ ++#include ++#include ++#include ++#ifdef HAVE_SYS_EVENTFD_H ++# include ++#endif ++#include ++#ifdef HAVE_SYS_STAT_H ++# include ++#endif ++#include ++ ++#include "ntstatus.h" ++#define WIN32_NO_STATUS ++#include "windef.h" ++#include "winternl.h" ++ ++#include "handle.h" ++#include "request.h" ++#include "file.h" ++#include "esync.h" ++ ++int do_esync(void) ++{ ++#ifdef HAVE_SYS_EVENTFD_H ++ static int do_esync_cached = -1; ++ ++ if (do_esync_cached == -1) ++ do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC")); ++ ++ return do_esync_cached; ++#else ++ return 0; ++#endif ++} ++ ++static char shm_name[29]; ++static int shm_fd; ++static off_t shm_size; ++static void **shm_addrs; ++static int shm_addrs_size; /* length of the allocated shm_addrs array */ ++static long pagesize; ++ ++static void shm_cleanup(void) ++{ ++ close( shm_fd ); ++ if (shm_unlink( shm_name ) == -1) ++ perror( "shm_unlink" ); ++} ++ ++void esync_init(void) ++{ ++ struct stat st; ++ ++ if (fstat( config_dir_fd, &st ) == -1) ++ fatal_error( "cannot stat config dir\n" ); ++ ++ if (st.st_ino != (unsigned long)st.st_ino) ++ sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); ++ else ++ sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino ); ++ ++ shm_unlink( shm_name ); ++ ++ shm_fd = shm_open( shm_name, O_RDWR | O_CREAT | O_EXCL, 0644 ); ++ if (shm_fd == -1) ++ perror( "shm_open" ); ++ ++ pagesize = sysconf( _SC_PAGESIZE ); ++ ++ shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); ++ shm_addrs_size = 128; ++ ++ shm_size = pagesize; ++ if (ftruncate( shm_fd, shm_size ) == -1) ++ perror( "ftruncate" ); ++ ++ fprintf( stderr, "esync: up and running.\n" ); ++ ++ atexit( shm_cleanup ); ++} ++ ++static struct list mutex_list = LIST_INIT(mutex_list); ++ ++struct esync ++{ ++ struct object obj; /* object header */ ++ int fd; /* eventfd file descriptor */ ++ enum esync_type type; ++ unsigned int shm_idx; /* index into the shared memory section */ ++ struct list mutex_entry; /* entry in the mutex list (if applicable) */ ++}; ++ ++static void esync_dump( struct object *obj, int verbose ); ++static int esync_get_esync_fd( struct object *obj, enum esync_type *type ); ++static unsigned int esync_map_access( struct object *obj, unsigned int access ); ++static void esync_destroy( struct object *obj ); ++ ++const struct object_ops esync_ops = ++{ ++ sizeof(struct esync), /* size */ ++ &no_type, /* type */ ++ esync_dump, /* dump */ ++ no_add_queue, /* add_queue */ ++ NULL, /* remove_queue */ ++ NULL, /* signaled */ ++ esync_get_esync_fd, /* get_esync_fd */ ++ NULL, /* satisfied */ ++ no_signal, /* signal */ ++ no_get_fd, /* get_fd */ ++ esync_map_access, /* map_access */ ++ default_get_sd, /* get_sd */ ++ default_set_sd, /* set_sd */ ++ no_get_full_name, /* get_full_name */ ++ no_lookup_name, /* lookup_name */ ++ directory_link_name, /* link_name */ ++ default_unlink_name, /* unlink_name */ ++ no_open_file, /* open_file */ ++ no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_close_handle, /* close_handle */ ++ esync_destroy /* destroy */ ++}; ++ ++static void esync_dump( struct object *obj, int verbose ) ++{ ++ struct esync *esync = (struct esync *)obj; ++ assert( obj->ops == &esync_ops ); ++ fprintf( stderr, "esync fd=%ld\n", esync->fd ); ++} ++ ++static int esync_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct esync *esync = (struct esync *)obj; ++ *type = esync->type; ++ return esync->fd; ++} ++ ++static unsigned int esync_map_access( struct object *obj, unsigned int access ) ++{ ++ /* Sync objects have the same flags. */ ++ if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | EVENT_QUERY_STATE; ++ if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | EVENT_MODIFY_STATE; ++ if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SYNCHRONIZE; ++ if (access & GENERIC_ALL) access |= STANDARD_RIGHTS_ALL | EVENT_QUERY_STATE | EVENT_MODIFY_STATE; ++ return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL); ++} ++ ++static void esync_destroy( struct object *obj ) ++{ ++ struct esync *esync = (struct esync *)obj; ++ if (esync->type == ESYNC_MUTEX) ++ list_remove( &esync->mutex_entry ); ++ close( esync->fd ); ++} ++ ++static int type_matches( enum esync_type type1, enum esync_type type2 ) ++{ ++ return (type1 == type2) || ++ ((type1 == ESYNC_AUTO_EVENT || type1 == ESYNC_MANUAL_EVENT) && ++ (type2 == ESYNC_AUTO_EVENT || type2 == ESYNC_MANUAL_EVENT)); ++} ++ ++static void *get_shm( unsigned int idx ) ++{ ++ int entry = (idx * 8) / pagesize; ++ int offset = (idx * 8) % pagesize; ++ ++ if (entry >= shm_addrs_size) ++ { ++ int new_size = max(shm_addrs_size * 2, entry + 1); ++ ++ if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) ++ fprintf( stderr, "esync: couldn't expand shm_addrs array to size %ld\n", entry + 1 ); ++ ++ memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); ++ ++ shm_addrs_size = new_size; ++ } ++ ++ if (!shm_addrs[entry]) ++ { ++ void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize ); ++ if (addr == (void *)-1) ++ { ++ fprintf( stderr, "esync: failed to map page %d (offset %#lx): ", entry, entry * pagesize ); ++ perror( "mmap" ); ++ } ++ ++ if (debug_level) ++ fprintf( stderr, "esync: Mapping page %d at %p.\n", entry, addr ); ++ ++ if (__sync_val_compare_and_swap( &shm_addrs[entry], 0, addr )) ++ munmap( addr, pagesize ); /* someone beat us to it */ ++ } ++ ++ return (void *)((unsigned long)shm_addrs[entry] + offset); ++} ++ ++struct semaphore ++{ ++ int max; ++ int count; ++}; ++C_ASSERT(sizeof(struct semaphore) == 8); ++ ++struct mutex ++{ ++ DWORD tid; ++ int count; /* recursion count */ ++}; ++C_ASSERT(sizeof(struct mutex) == 8); ++ ++struct event ++{ ++ int signaled; ++ int locked; ++}; ++C_ASSERT(sizeof(struct event) == 8); ++ ++struct esync *create_esync( struct object *root, const struct unicode_str *name, ++ unsigned int attr, int initval, int max, enum esync_type type, ++ const struct security_descriptor *sd ) ++{ ++#ifdef HAVE_SYS_EVENTFD_H ++ struct esync *esync; ++ ++ if ((esync = create_named_object( root, &esync_ops, name, attr, sd ))) ++ { ++ if (get_error() != STATUS_OBJECT_NAME_EXISTS) ++ { ++ int flags = EFD_CLOEXEC | EFD_NONBLOCK; ++ ++ if (type == ESYNC_SEMAPHORE) ++ flags |= EFD_SEMAPHORE; ++ ++ /* initialize it if it didn't already exist */ ++ esync->fd = eventfd( initval, flags ); ++ if (esync->fd == -1) ++ { ++ perror( "eventfd" ); ++ file_set_error(); ++ release_object( esync ); ++ return NULL; ++ } ++ esync->type = type; ++ ++ /* Use the fd as index, since that'll be unique across all ++ * processes, but should hopefully end up also allowing reuse. */ ++ esync->shm_idx = esync->fd + 1; /* we keep index 0 reserved */ ++ while (esync->shm_idx * 8 >= shm_size) ++ { ++ /* Better expand the shm section. */ ++ shm_size += pagesize; ++ if (ftruncate( shm_fd, shm_size ) == -1) ++ { ++ fprintf( stderr, "esync: couldn't expand %s to size %ld: ", ++ shm_name, shm_size ); ++ perror( "ftruncate" ); ++ } ++ } ++ ++ /* Initialize the shared memory portion. We want to do this on the ++ * server side to avoid a potential though unlikely race whereby ++ * the same object is opened and used between the time it's created ++ * and the time its shared memory portion is initialized. */ ++ switch (type) ++ { ++ case ESYNC_SEMAPHORE: ++ { ++ struct semaphore *semaphore = get_shm( esync->shm_idx ); ++ semaphore->max = max; ++ semaphore->count = initval; ++ break; ++ } ++ case ESYNC_AUTO_EVENT: ++ case ESYNC_MANUAL_EVENT: ++ { ++ struct event *event = get_shm( esync->shm_idx ); ++ event->signaled = initval ? 1 : 0; ++ event->locked = 0; ++ break; ++ } ++ case ESYNC_MUTEX: ++ { ++ struct mutex *mutex = get_shm( esync->shm_idx ); ++ mutex->tid = initval ? 0 : current->id; ++ mutex->count = initval ? 0 : 1; ++ list_add_tail( &mutex_list, &esync->mutex_entry ); ++ break; ++ } ++ default: ++ assert( 0 ); ++ } ++ } ++ else ++ { ++ /* validate the type */ ++ if (!type_matches( type, esync->type )) ++ { ++ release_object( &esync->obj ); ++ set_error( STATUS_OBJECT_TYPE_MISMATCH ); ++ return NULL; ++ } ++ } ++ } ++ return esync; ++#else ++ /* FIXME: Provide a fallback implementation using pipe(). */ ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return NULL; ++#endif ++} ++ ++/* Create a file descriptor for an existing handle. ++ * Caller must close the handle when it's done; it's not linked to an esync ++ * server object in any way. */ ++int esync_create_fd( int initval, int flags ) ++{ ++#ifdef HAVE_SYS_EVENTFD_H ++ int fd; ++ ++ fd = eventfd( initval, flags | EFD_CLOEXEC | EFD_NONBLOCK ); ++ if (fd == -1) ++ perror( "eventfd" ); ++ ++ return fd; ++#else ++ return -1; ++#endif ++} ++ ++/* Wake up a specific fd. */ ++void esync_wake_fd( int fd ) ++{ ++ static const uint64_t value = 1; ++ ++ if (write( fd, &value, sizeof(value) ) == -1) ++ perror( "esync: write" ); ++} ++ ++/* Wake up a server-side esync object. */ ++void esync_wake_up( struct object *obj ) ++{ ++ enum esync_type dummy; ++ int fd; ++ ++ if (obj->ops->get_esync_fd) ++ { ++ fd = obj->ops->get_esync_fd( obj, &dummy ); ++ esync_wake_fd( fd ); ++ } ++} ++ ++void esync_clear( int fd ) ++{ ++ uint64_t value; ++ ++ /* we don't care about the return value */ ++ read( fd, &value, sizeof(value) ); ++} ++ ++static inline void small_pause(void) ++{ ++#ifdef __i386__ ++ __asm__ __volatile__( "rep;nop" : : : "memory" ); ++#else ++ __asm__ __volatile__( "" : : : "memory" ); ++#endif ++} ++ ++/* Server-side event support. */ ++void esync_set_event( struct esync *esync ) ++{ ++ static const uint64_t value = 1; ++ struct event *event = get_shm( esync->shm_idx ); ++ ++ assert( esync->obj.ops == &esync_ops ); ++ assert( event != NULL ); ++ ++ if (debug_level) ++ fprintf( stderr, "esync_set_event() fd=%ld\n", esync->fd ); ++ ++ if (esync->type == ESYNC_MANUAL_EVENT) ++ { ++ /* Acquire the spinlock. */ ++ while (__sync_val_compare_and_swap( &event->locked, 0, 1 )) ++ small_pause(); ++ } ++ ++ if (!__atomic_exchange_n( &event->signaled, 1, __ATOMIC_SEQ_CST )) ++ { ++ if (write( esync->fd, &value, sizeof(value) ) == -1) ++ perror( "esync: write" ); ++ } ++ ++ if (esync->type == ESYNC_MANUAL_EVENT) ++ { ++ /* Release the spinlock. */ ++ event->locked = 0; ++ } ++} ++ ++void esync_reset_event( struct esync *esync ) ++{ ++ static uint64_t value = 1; ++ struct event *event = get_shm( esync->shm_idx ); ++ ++ assert( esync->obj.ops == &esync_ops ); ++ assert( event != NULL ); ++ ++ if (debug_level) ++ fprintf( stderr, "esync_reset_event() fd=%ld\n", esync->fd ); ++ ++ if (esync->type == ESYNC_MANUAL_EVENT) ++ { ++ /* Acquire the spinlock. */ ++ while (__sync_val_compare_and_swap( &event->locked, 0, 1 )) ++ small_pause(); ++ } ++ ++ /* Only bother signaling the fd if we weren't already signaled. */ ++ if (__atomic_exchange_n( &event->signaled, 0, __ATOMIC_SEQ_CST )) ++ { ++ /* we don't care about the return value */ ++ read( esync->fd, &value, sizeof(value) ); ++ } ++ ++ if (esync->type == ESYNC_MANUAL_EVENT) ++ { ++ /* Release the spinlock. */ ++ event->locked = 0; ++ } ++} ++ ++void esync_abandon_mutexes( struct thread *thread ) ++{ ++ struct esync *esync; ++ ++ LIST_FOR_EACH_ENTRY( esync, &mutex_list, struct esync, mutex_entry ) ++ { ++ struct mutex *mutex = get_shm( esync->shm_idx ); ++ ++ if (mutex->tid == thread->id) ++ { ++ if (debug_level) ++ fprintf( stderr, "esync_abandon_mutexes() fd=%ld\n", esync->fd ); ++ mutex->tid = ~0; ++ mutex->count = 0; ++ esync_wake_fd( esync->fd ); ++ } ++ } ++} ++ ++DECL_HANDLER(create_esync) ++{ ++ struct esync *esync; ++ struct unicode_str name; ++ struct object *root; ++ const struct security_descriptor *sd; ++ const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root ); ++ ++ if (!do_esync()) ++ { ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return; ++ } ++ ++ if (!req->type) ++ { ++ set_error( STATUS_INVALID_PARAMETER ); ++ return; ++ } ++ ++ if (!objattr) return; ++ ++ if ((esync = create_esync( root, &name, objattr->attributes, req->initval, req->max, req->type, sd ))) ++ { ++ if (get_error() == STATUS_OBJECT_NAME_EXISTS) ++ reply->handle = alloc_handle( current->process, esync, req->access, objattr->attributes ); ++ else ++ reply->handle = alloc_handle_no_access_check( current->process, esync, ++ req->access, objattr->attributes ); ++ ++ reply->type = esync->type; ++ reply->shm_idx = esync->shm_idx; ++ send_client_fd( current->process, esync->fd, reply->handle ); ++ release_object( esync ); ++ } ++ ++ if (root) release_object( root ); ++} ++ ++DECL_HANDLER(open_esync) ++{ ++ struct unicode_str name = get_req_unicode_str(); ++ ++ reply->handle = open_object( current->process, req->rootdir, req->access, ++ &esync_ops, &name, req->attributes ); ++ ++ /* send over the fd */ ++ if (reply->handle) ++ { ++ struct esync *esync; ++ ++ if (!(esync = (struct esync *)get_handle_obj( current->process, reply->handle, ++ 0, &esync_ops ))) ++ return; ++ ++ if (!type_matches( req->type, esync->type )) ++ { ++ set_error( STATUS_OBJECT_TYPE_MISMATCH ); ++ release_object( esync ); ++ return; ++ } ++ ++ reply->type = esync->type; ++ reply->shm_idx = esync->shm_idx; ++ ++ send_client_fd( current->process, esync->fd, reply->handle ); ++ release_object( esync ); ++ } ++} ++ ++/* Retrieve a file descriptor for an esync object which will be signaled by the ++ * server. The client should only read from (i.e. wait on) this object. */ ++DECL_HANDLER(get_esync_fd) ++{ ++ struct object *obj; ++ enum esync_type type; ++ int fd; ++ ++ if (!(obj = get_handle_obj( current->process, req->handle, SYNCHRONIZE, NULL ))) ++ return; ++ ++ if (obj->ops->get_esync_fd) ++ { ++ fd = obj->ops->get_esync_fd( obj, &type ); ++ reply->type = type; ++ if (obj->ops == &esync_ops) ++ { ++ struct esync *esync = (struct esync *)obj; ++ reply->shm_idx = esync->shm_idx; ++ } ++ else ++ reply->shm_idx = 0; ++ send_client_fd( current->process, fd, req->handle ); ++ } ++ else ++ { ++ if (debug_level) ++ { ++ fprintf( stderr, "%04x: esync: can't wait on object: ", current->id ); ++ obj->ops->dump( obj, 0 ); ++ } ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ } ++ ++ release_object( obj ); ++} ++ ++/* Return the fd used for waiting on user APCs. */ ++DECL_HANDLER(get_esync_apc_fd) ++{ ++ send_client_fd( current->process, current->esync_apc_fd, current->id ); ++} +diff --git a/server/esync.h b/server/esync.h +new file mode 100644 +index 00000000000..125da8e9d12 +--- /dev/null ++++ b/server/esync.h +@@ -0,0 +1,33 @@ ++/* ++ * eventfd-based synchronization objects ++ * ++ * Copyright (C) 2018 Zebediah Figura ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA ++ */ ++ ++extern int do_esync(void); ++void esync_init(void); ++int esync_create_fd( int initval, int flags ); ++void esync_wake_fd( int fd ); ++void esync_wake_up( struct object *obj ); ++void esync_clear( int fd ); ++ ++struct esync; ++ ++extern const struct object_ops esync_ops; ++void esync_set_event( struct esync *esync ); ++void esync_reset_event( struct esync *esync ); ++void esync_abandon_mutexes( struct thread *thread ); +diff --git a/server/event.c b/server/event.c +index 9d8af7c87ea..8607b494b6d 100644 +--- a/server/event.c ++++ b/server/event.c +@@ -35,6 +35,7 @@ + #include "thread.h" + #include "request.h" + #include "security.h" ++#include "esync.h" + + static const WCHAR event_name[] = {'E','v','e','n','t'}; + +@@ -42,13 +43,16 @@ struct event + struct list kernel_object; /* list of kernel object pointers */ + int manual_reset; /* is it a manual reset event? */ + int signaled; /* event has been signaled */ ++ int esync_fd; /* esync file descriptor */ + }; + + static void event_dump( struct object *obj, int verbose ); + static int event_signaled( struct object *obj, struct wait_queue_entry *entry ); + static void event_satisfied( struct object *obj, struct wait_queue_entry *entry ); ++static int event_get_esync_fd( struct object *obj, enum esync_type *type ); + static int event_signal( struct object *obj, unsigned int access); + static struct list *event_get_kernel_obj_list( struct object *obj ); ++static void event_destroy( struct object *obj ); + + static const struct object_ops event_ops = + { +@@ -60,6 +64,7 @@ static const struct object_ops event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + event_signaled, /* signaled */ ++ event_get_esync_fd, /* get_esync_fd */ + event_satisfied, /* satisfied */ + event_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -73,7 +78,7 @@ static const struct object_ops event_ops = + no_open_file, /* open_file */ + event_get_kernel_obj_list, /* get_kernel_obj_list */ + no_close_handle, /* close_handle */ +- no_destroy /* destroy */ ++ event_destroy /* destroy */ + }; + + +@@ -95,6 +100,7 @@ static const struct object_ops keyed_event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + keyed_event_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -126,6 +132,9 @@ struct event *create_event( struct object *root, const struct unicode_str *name, + list_init( &event->kernel_object ); + event->manual_reset = manual_reset; + event->signaled = initial_state; ++ ++ if (do_esync()) ++ event->esync_fd = esync_create_fd( initial_state, 0 ); + } + } + return event; +@@ -133,6 +142,10 @@ struct event *create_event( struct object *root, const struct unicode_str *name, + + struct event *get_event_obj( struct process *process, obj_handle_t handle, unsigned int access ) + { ++ struct object *obj; ++ if (do_esync() && (obj = get_handle_obj( process, handle, access, &esync_ops))) ++ return (struct event *)obj; /* even though it's not an event */ ++ + return (struct event *)get_handle_obj( process, handle, access, &event_ops ); + } + +@@ -146,6 +159,12 @@ void pulse_event( struct event *event ) + + void set_event( struct event *event ) + { ++ if (do_esync() && event->obj.ops == &esync_ops) ++ { ++ esync_set_event( (struct esync *)event ); ++ return; ++ } ++ + event->signaled = 1; + /* wake up all waiters if manual reset, a single one otherwise */ + wake_up( &event->obj, !event->manual_reset ); +@@ -153,7 +172,15 @@ void set_event( struct event *event ) + + void reset_event( struct event *event ) + { ++ if (do_esync() && event->obj.ops == &esync_ops) ++ { ++ esync_reset_event( (struct esync *)event ); ++ return; ++ } + event->signaled = 0; ++ ++ if (do_esync()) ++ esync_clear( event->esync_fd ); + } + + static void event_dump( struct object *obj, int verbose ) +@@ -177,6 +204,13 @@ static int event_signaled( struct object *obj, struct wait_queue_entry *entry ) + return event->signaled; + } + ++static int event_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct event *event = (struct event *)obj; ++ *type = event->manual_reset ? ESYNC_MANUAL_SERVER : ESYNC_AUTO_SERVER; ++ return event->esync_fd; ++} ++ + static void event_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct event *event = (struct event *)obj; +@@ -214,6 +248,14 @@ static struct list *event_get_kernel_obj_list( struct object *obj ) + return &event->kernel_object; + } + ++static void event_destroy( struct object *obj ) ++{ ++ struct event *event = (struct event *)obj; ++ ++ if (do_esync()) ++ close( event->esync_fd ); ++} ++ + struct keyed_event *create_keyed_event( struct object *root, const struct unicode_str *name, + unsigned int attr, const struct security_descriptor *sd ) + { +diff --git a/server/fd.c b/server/fd.c +index e5a4be7a3df..bbc2462163d 100644 +--- a/server/fd.c ++++ b/server/fd.c +@@ -102,6 +102,7 @@ + #include "handle.h" + #include "process.h" + #include "request.h" ++#include "esync.h" + + #include "winternl.h" + #include "winioctl.h" +@@ -203,6 +204,7 @@ struct fd + struct completion *completion; /* completion object attached to this fd */ + apc_param_t comp_key; /* completion key to set in completion events */ + unsigned int comp_flags; /* completion flags */ ++ int esync_fd; /* esync file descriptor */ + }; + + static void fd_dump( struct object *obj, int verbose ); +@@ -216,6 +218,7 @@ static const struct object_ops fd_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -257,6 +260,7 @@ static const struct object_ops device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -297,6 +301,7 @@ static const struct object_ops inode_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -339,6 +344,7 @@ static const struct object_ops file_lock_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + file_lock_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -1589,6 +1595,9 @@ static void fd_destroy( struct object *obj ) + free( fd->unlink_name ); + free( fd->unix_name ); + } ++ ++ if (do_esync()) ++ close( fd->esync_fd ); + } + + /* check if the desired access is possible without violating */ +@@ -1704,12 +1713,16 @@ static struct fd *alloc_fd_object(void) + fd->poll_index = -1; + fd->completion = NULL; + fd->comp_flags = 0; ++ fd->esync_fd = -1; + init_async_queue( &fd->read_q ); + init_async_queue( &fd->write_q ); + init_async_queue( &fd->wait_q ); + list_init( &fd->inode_entry ); + list_init( &fd->locks ); + ++ if (do_esync()) ++ fd->esync_fd = esync_create_fd( 1, 0 ); ++ + if ((fd->poll_index = add_poll_user( fd )) == -1) + { + release_object( fd ); +@@ -1742,11 +1755,15 @@ struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *use + fd->completion = NULL; + fd->comp_flags = 0; + fd->no_fd_status = STATUS_BAD_DEVICE_TYPE; ++ fd->esync_fd = -1; + init_async_queue( &fd->read_q ); + init_async_queue( &fd->write_q ); + init_async_queue( &fd->wait_q ); + list_init( &fd->inode_entry ); + list_init( &fd->locks ); ++ ++ if (do_esync()) ++ fd->esync_fd = esync_create_fd( 0, 0 ); + return fd; + } + +@@ -2172,6 +2189,9 @@ void set_fd_signaled( struct fd *fd, int signaled ) + if (fd->comp_flags & FILE_SKIP_SET_EVENT_ON_HANDLE) return; + fd->signaled = signaled; + if (signaled) wake_up( fd->user, 0 ); ++ ++ if (do_esync() && !signaled) ++ esync_clear( fd->esync_fd ); + } + + /* check if events are pending and if yes return which one(s) */ +@@ -2203,6 +2223,15 @@ int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry ) + return ret; + } + ++int default_fd_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct fd *fd = get_obj_fd( obj ); ++ int ret = fd->esync_fd; ++ *type = ESYNC_MANUAL_SERVER; ++ release_object( fd ); ++ return ret; ++} ++ + int default_fd_get_poll_events( struct fd *fd ) + { + int events = 0; +diff --git a/server/file.c b/server/file.c +index 23b6de275cd..49c4ba32b42 100644 +--- a/server/file.c ++++ b/server/file.c +@@ -111,6 +111,7 @@ static const struct object_ops file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + file_get_fd, /* get_fd */ +diff --git a/server/file.h b/server/file.h +index bcbdea5b8ce..cae0ac1e395 100644 +--- a/server/file.h ++++ b/server/file.h +@@ -102,6 +102,7 @@ extern void set_fd_signaled( struct fd *fd, int signaled ); + extern char *dup_fd_name( struct fd *root, const char *name ); + + extern int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry ); ++extern int default_fd_get_esync_fd( struct object *obj, enum esync_type *type ); + extern int default_fd_get_poll_events( struct fd *fd ); + extern void default_poll_event( struct fd *fd, int event ); + extern void fd_queue_async( struct fd *fd, struct async *async, int type ); +diff --git a/server/handle.c b/server/handle.c +index c1fb4a490a9..cb5628b7e06 100644 +--- a/server/handle.c ++++ b/server/handle.c +@@ -123,6 +123,7 @@ static const struct object_ops handle_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/hook.c b/server/hook.c +index 2a3da247313..61b5014c442 100644 +--- a/server/hook.c ++++ b/server/hook.c +@@ -81,6 +81,7 @@ static const struct object_ops hook_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/mailslot.c b/server/mailslot.c +index fb2a94cc7b8..18fef4b0466 100644 +--- a/server/mailslot.c ++++ b/server/mailslot.c +@@ -78,6 +78,7 @@ static const struct object_ops mailslot_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + mailslot_get_fd, /* get_fd */ +@@ -136,6 +137,7 @@ static const struct object_ops mail_writer_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + mail_writer_get_fd, /* get_fd */ +@@ -199,6 +201,7 @@ static const struct object_ops mailslot_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -229,6 +232,7 @@ static const struct object_ops mailslot_device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + mailslot_device_file_get_fd, /* get_fd */ +diff --git a/server/main.c b/server/main.c +index 14ba74638ae..3e02cbb3832 100644 +--- a/server/main.c ++++ b/server/main.c +@@ -36,6 +36,7 @@ + #include "thread.h" + #include "request.h" + #include "unicode.h" ++#include "esync.h" + + /* command-line options */ + int debug_level = 0; +@@ -140,6 +141,9 @@ int main( int argc, char *argv[] ) + sock_init(); + open_master_socket(); + ++ if (do_esync()) ++ esync_init(); ++ + if (debug_level) fprintf( stderr, "wineserver: starting (pid=%ld)\n", (long) getpid() ); + set_current_time(); + init_scheduler(); +diff --git a/server/mapping.c b/server/mapping.c +index 1da05412b6a..10def3ca694 100644 +--- a/server/mapping.c ++++ b/server/mapping.c +@@ -68,6 +68,7 @@ static const struct object_ops ranges_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -104,6 +105,7 @@ static const struct object_ops shared_map_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -162,6 +164,7 @@ static const struct object_ops mapping_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + mapping_get_fd, /* get_fd */ +diff --git a/server/mutex.c b/server/mutex.c +index b4f04ad307b..1235ab4731f 100644 +--- a/server/mutex.c ++++ b/server/mutex.c +@@ -61,6 +61,7 @@ static const struct object_ops mutex_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + mutex_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + mutex_satisfied, /* satisfied */ + mutex_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/named_pipe.c b/server/named_pipe.c +index c83e8c17027..e59a5b6c183 100644 +--- a/server/named_pipe.c ++++ b/server/named_pipe.c +@@ -118,6 +118,7 @@ static const struct object_ops named_pipe_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -162,6 +163,7 @@ static const struct object_ops pipe_server_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + pipe_end_get_fd, /* get_fd */ +@@ -205,6 +207,7 @@ static const struct object_ops pipe_client_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + pipe_end_get_fd, /* get_fd */ +@@ -252,6 +255,7 @@ static const struct object_ops named_pipe_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -283,6 +287,7 @@ static const struct object_ops named_pipe_device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + named_pipe_device_file_get_fd, /* get_fd */ +diff --git a/server/object.h b/server/object.h +index 43f636b397b..5b6bb9cbfe1 100644 +--- a/server/object.h ++++ b/server/object.h +@@ -68,6 +68,8 @@ struct object_ops + void (*remove_queue)(struct object *,struct wait_queue_entry *); + /* is object signaled? */ + int (*signaled)(struct object *,struct wait_queue_entry *); ++ /* return the esync fd for this object */ ++ int (*get_esync_fd)(struct object *, enum esync_type *type); + /* wait satisfied */ + void (*satisfied)(struct object *,struct wait_queue_entry *); + /* signal an object */ +diff --git a/server/process.c b/server/process.c +index 1b6ddb1b982..df50955f621 100644 +--- a/server/process.c ++++ b/server/process.c +@@ -49,6 +49,7 @@ + #include "request.h" + #include "user.h" + #include "security.h" ++#include "esync.h" + + /* process object */ + +@@ -68,6 +69,7 @@ static struct security_descriptor *process_get_sd( struct object *obj ); + static void process_poll_event( struct fd *fd, int event ); + static struct list *process_get_kernel_obj_list( struct object *obj ); + static void process_destroy( struct object *obj ); ++static int process_get_esync_fd( struct object *obj, enum esync_type *type ); + static void terminate_process( struct process *process, struct thread *skip, int exit_code ); + + static const struct object_ops process_ops = +@@ -78,6 +80,7 @@ static const struct object_ops process_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + process_signaled, /* signaled */ ++ process_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -129,6 +132,7 @@ static const struct object_ops startup_info_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + startup_info_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -175,6 +179,7 @@ static const struct object_ops job_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + job_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -541,6 +546,7 @@ struct process *create_process( int fd, struct process *parent, int inherit_all, + process->rawinput_kbd = NULL; + memset( &process->image_info, 0, sizeof(process->image_info) ); + list_init( &process->rawinput_entry ); ++ process->esync_fd = -1; + list_init( &process->kernel_object ); + list_init( &process->thread_list ); + list_init( &process->locks ); +@@ -597,6 +603,9 @@ struct process *create_process( int fd, struct process *parent, int inherit_all, + if (!token_assign_label( process->token, security_high_label_sid )) + goto error; + ++ if (do_esync()) ++ process->esync_fd = esync_create_fd( 0, 0 ); ++ + set_fd_events( process->msg_fd, POLLIN ); /* start listening to events */ + return process; + +@@ -645,6 +654,7 @@ static void process_destroy( struct object *obj ) + if (process->token) release_object( process->token ); + free( process->dir_cache ); + free( process->image ); ++ if (do_esync()) close( process->esync_fd ); + } + + /* dump a process on stdout for debugging purposes */ +@@ -668,6 +678,13 @@ static int process_signaled( struct object *obj, struct wait_queue_entry *entry + return !process->running_threads; + } + ++static int process_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct process *process = (struct process *)obj; ++ *type = ESYNC_MANUAL_SERVER; ++ return process->esync_fd; ++} ++ + static unsigned int process_map_access( struct object *obj, unsigned int access ) + { + access = default_map_access( obj, access ); +diff --git a/server/process.h b/server/process.h +index 56092e5b1ac..eec69ddbcaf 100644 +--- a/server/process.h ++++ b/server/process.h +@@ -98,6 +98,7 @@ struct process + const struct rawinput_device *rawinput_kbd; /* rawinput keyboard device, if any */ + struct list kernel_object; /* list of kernel object pointers */ + pe_image_info_t image_info; /* main exe image info */ ++ int esync_fd; /* esync file descriptor (signaled on exit) */ + }; + + /* process functions */ +diff --git a/server/protocol.def b/server/protocol.def +index 45f22624d5d..2a8662354f6 100644 +--- a/server/protocol.def ++++ b/server/protocol.def +@@ -3713,3 +3713,56 @@ struct handle_info + @REQ(resume_process) + obj_handle_t handle; /* process handle */ + @END ++ ++enum esync_type ++{ ++ ESYNC_SEMAPHORE = 1, ++ ESYNC_AUTO_EVENT, ++ ESYNC_MANUAL_EVENT, ++ ESYNC_MUTEX, ++ ESYNC_AUTO_SERVER, ++ ESYNC_MANUAL_SERVER, ++ ESYNC_QUEUE, ++}; ++ ++/* Create a new eventfd-based synchronization object */ ++@REQ(create_esync) ++ unsigned int access; /* wanted access rights */ ++ int initval; /* initial value */ ++ int type; /* type of esync object */ ++ int max; /* maximum count on a semaphore */ ++ VARARG(objattr,object_attributes); /* object attributes */ ++@REPLY ++ obj_handle_t handle; /* handle to the object */ ++ int type; /* actual type (may be different for events) */ ++ unsigned int shm_idx; ++@END ++ ++@REQ(open_esync) ++ unsigned int access; /* wanted access rights */ ++ unsigned int attributes; /* object attributes */ ++ obj_handle_t rootdir; /* root directory */ ++ int type; /* type of esync object (above) */ ++ VARARG(name,unicode_str); /* object name */ ++@REPLY ++ obj_handle_t handle; /* handle to the event */ ++ int type; /* type of esync object (above) */ ++ unsigned int shm_idx; /* this object's index into the shm section */ ++@END ++ ++/* Retrieve the esync fd for an object. */ ++@REQ(get_esync_fd) ++ obj_handle_t handle; /* handle to the object */ ++@REPLY ++ int type; ++ unsigned int shm_idx; ++@END ++ ++/* Notify the server that we are doing a message wait or done with one. */ ++@REQ(esync_msgwait) ++ int in_msgwait; /* are we in a message wait? */ ++@END ++ ++/* Retrieve the fd to wait on for user APCs. */ ++@REQ(get_esync_apc_fd) ++@END +diff --git a/server/queue.c b/server/queue.c +index 42b0f4e6bfd..a78748b96ca 100644 +--- a/server/queue.c ++++ b/server/queue.c +@@ -43,6 +43,7 @@ + #include "process.h" + #include "request.h" + #include "user.h" ++#include "esync.h" + + #define WM_NCMOUSEFIRST WM_NCMOUSEMOVE + #define WM_NCMOUSELAST (WM_NCMOUSEFIRST+(WM_MOUSELAST-WM_MOUSEFIRST)) +@@ -144,6 +145,8 @@ struct msg_queue + timeout_t last_get_msg; /* time of last get message call */ + int keystate_lock; /* owns an input keystate lock */ + const queue_shm_t *shared; /* queue in session shared memory */ ++ int esync_fd; /* esync file descriptor (signalled on message) */ ++ int esync_in_msgwait; /* our thread is currently waiting on us */ + }; + + struct hotkey +@@ -160,6 +163,7 @@ static void msg_queue_dump( struct object *obj, int verbose ); + static int msg_queue_add_queue( struct object *obj, struct wait_queue_entry *entry ); + static void msg_queue_remove_queue( struct object *obj, struct wait_queue_entry *entry ); + static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static int msg_queue_get_esync_fd( struct object *obj, enum esync_type *type ); + static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ); + static void msg_queue_destroy( struct object *obj ); + static void msg_queue_poll_event( struct fd *fd, int event ); +@@ -175,6 +179,7 @@ static const struct object_ops msg_queue_ops = + msg_queue_add_queue, /* add_queue */ + msg_queue_remove_queue, /* remove_queue */ + msg_queue_signaled, /* signaled */ ++ msg_queue_get_esync_fd, /* get_esync_fd */ + msg_queue_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -212,6 +217,7 @@ static const struct object_ops thread_input_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -313,11 +319,16 @@ static struct msg_queue *create_msg_queue( struct thread *thread, struct thread_ + queue->hooks = NULL; + queue->last_get_msg = current_time; + queue->keystate_lock = 0; ++ queue->esync_fd = -1; ++ queue->esync_in_msgwait = 0; + list_init( &queue->send_result ); + list_init( &queue->callback_result ); + list_init( &queue->pending_timers ); + list_init( &queue->expired_timers ); + for (i = 0; i < NB_MSG_KINDS; i++) list_init( &queue->msg_list[i] ); ++ ++ if (do_esync()) ++ queue->esync_fd = esync_create_fd( 0, 0 ); + + thread->queue = queue; + } +@@ -497,6 +508,9 @@ static inline void clear_queue_bits( struct msg_queue *queue, unsigned int bits + if (queue->keystate_lock) unlock_input_keystate( queue->input ); + queue->keystate_lock = 0; + } ++ ++ if (do_esync() && !is_signaled( queue )) ++ esync_clear( queue->esync_fd ); + } + + /* check whether msg is a keyboard message */ +@@ -955,6 +969,10 @@ static int is_queue_hung( struct msg_queue *queue ) + if (get_wait_queue_thread(entry)->queue == queue) + return 0; /* thread is waiting on queue -> not hung */ + } ++ ++ if (do_esync() && queue->esync_in_msgwait) ++ return 0; /* thread is waiting on queue in absentia -> not hung */ ++ + return 1; + } + +@@ -1010,6 +1028,13 @@ static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entr + return ret || is_signaled( queue ); + } + ++static int msg_queue_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct msg_queue *queue = (struct msg_queue *)obj; ++ *type = ESYNC_QUEUE; ++ return queue->esync_fd; ++} ++ + static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct msg_queue *queue = (struct msg_queue *)obj; +@@ -2491,6 +2516,9 @@ DECL_HANDLER(get_queue_status) + shared->changed_bits &= ~req->clear_bits; + } + SHARED_WRITE_END; ++ ++ if (do_esync() && !is_signaled( queue )) ++ esync_clear( queue->esync_fd ); + } + else reply->wake_bits = reply->changed_bits = 0; + } +@@ -3493,3 +3521,20 @@ DECL_HANDLER(get_rawinput_devices) + devices[i++] = e->device; + } + } ++ ++DECL_HANDLER(esync_msgwait) ++{ ++ struct msg_queue *queue = get_current_queue(); ++ const queue_shm_t *queue_shm; ++ ++ if (!queue) return; ++ queue_shm = queue->shared; ++ queue->esync_in_msgwait = req->in_msgwait; ++ ++ if (current->process->idle_event && !(queue_shm->wake_mask & QS_SMRESULT)) ++ set_event( current->process->idle_event ); ++ ++ /* and start/stop waiting on the driver */ ++ if (queue->fd) ++ set_fd_events( queue->fd, req->in_msgwait ? POLLIN : 0 ); ++} +diff --git a/server/registry.c b/server/registry.c +index c00022ff63b..996bff5ef6d 100644 +--- a/server/registry.c ++++ b/server/registry.c +@@ -167,6 +167,7 @@ static const struct object_ops key_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/request.c b/server/request.c +index 97bf1a746d2..20b0ec309f3 100644 +--- a/server/request.c ++++ b/server/request.c +@@ -96,6 +96,7 @@ static const struct object_ops master_socket_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/semaphore.c b/server/semaphore.c +index 1a1f796f85b..d7d3a24e48f 100644 +--- a/server/semaphore.c ++++ b/server/semaphore.c +@@ -58,6 +58,7 @@ static const struct object_ops semaphore_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + semaphore_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + semaphore_satisfied, /* satisfied */ + semaphore_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/serial.c b/server/serial.c +index 30fe6e8380f..a50ace9903f 100644 +--- a/server/serial.c ++++ b/server/serial.c +@@ -92,6 +92,7 @@ static const struct object_ops serial_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + serial_get_fd, /* get_fd */ +diff --git a/server/signal.c b/server/signal.c +index 7c2bf2cc154..b6d6dcfc4b6 100644 +--- a/server/signal.c ++++ b/server/signal.c +@@ -67,6 +67,7 @@ static const struct object_ops handler_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/sock.c b/server/sock.c +index 07e1cf3a2ca..c2dfa8fb8ce 100644 +--- a/server/sock.c ++++ b/server/sock.c +@@ -172,6 +172,7 @@ static const struct object_ops sock_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + sock_get_fd, /* get_fd */ +@@ -1173,6 +1174,7 @@ static const struct object_ops ifchange_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + ifchange_get_fd, /* get_fd */ +@@ -1393,6 +1395,7 @@ static const struct object_ops socket_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/symlink.c b/server/symlink.c +index 86ff09cd7e5..07f3c924f25 100644 +--- a/server/symlink.c ++++ b/server/symlink.c +@@ -60,6 +60,7 @@ static const struct object_ops symlink_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/thread.c b/server/thread.c +index 0e916e181bc..1a245c58396 100644 +--- a/server/thread.c ++++ b/server/thread.c +@@ -51,6 +51,7 @@ + #include "request.h" + #include "user.h" + #include "security.h" ++#include "esync.h" + + + /* thread queues */ +@@ -110,6 +111,7 @@ static const struct object_ops thread_apc_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + thread_apc_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -147,6 +149,7 @@ static const struct object_ops context_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + context_signaled, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -169,6 +172,7 @@ static const struct object_ops context_ops = + + static void dump_thread( struct object *obj, int verbose ); + static int thread_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static int thread_get_esync_fd( struct object *obj, enum esync_type *type ); + static unsigned int thread_map_access( struct object *obj, unsigned int access ); + static void thread_poll_event( struct fd *fd, int event ); + static struct list *thread_get_kernel_obj_list( struct object *obj ); +@@ -182,6 +186,7 @@ static const struct object_ops thread_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + thread_signaled, /* signaled */ ++ thread_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -221,6 +226,8 @@ static inline void init_thread_structure( struct thread *thread ) + thread->context = NULL; + thread->teb = 0; + thread->entry_point = 0; ++ thread->esync_fd = -1; ++ thread->esync_apc_fd = -1; + thread->system_regs = 0; + thread->queue = NULL; + thread->wait = NULL; +@@ -357,6 +364,12 @@ struct thread *create_thread( int fd, struct process *process, const struct secu + return NULL; + } + ++ if (do_esync()) ++ { ++ thread->esync_fd = esync_create_fd( 0, 0 ); ++ thread->esync_apc_fd = esync_create_fd( 0, 0 ); ++ } ++ + set_fd_events( thread->request_fd, POLLIN ); /* start listening to events */ + add_process_thread( thread->process, thread ); + return thread; +@@ -437,6 +450,9 @@ static void destroy_thread( struct object *obj ) + if (thread->exit_poll) remove_timeout_user( thread->exit_poll ); + if (thread->id) free_ptid( thread->id ); + if (thread->token) release_object( thread->token ); ++ ++ if (do_esync()) ++ close( thread->esync_fd ); + } + + /* dump a thread on stdout for debugging purposes */ +@@ -461,6 +477,13 @@ static int thread_signaled( struct object *obj, struct wait_queue_entry *entry ) + return mythread->state == TERMINATED && !mythread->exit_poll; + } + ++static int thread_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct thread *thread = (struct thread *)obj; ++ *type = ESYNC_MANUAL_SERVER; ++ return thread->esync_fd; ++} ++ + static unsigned int thread_map_access( struct object *obj, unsigned int access ) + { + access = default_map_access( obj, access ); +@@ -1045,6 +1068,9 @@ void wake_up( struct object *obj, int max ) + struct list *ptr; + int ret; + ++ if (do_esync()) ++ esync_wake_up( obj ); ++ + LIST_FOR_EACH( ptr, &obj->wait_queue ) + { + struct wait_queue_entry *entry = LIST_ENTRY( ptr, struct wait_queue_entry, entry ); +@@ -1129,8 +1155,13 @@ static int queue_apc( struct process *process, struct thread *thread, struct thr + grab_object( apc ); + list_add_tail( queue, &apc->entry ); + if (!list_prev( queue, &apc->entry )) /* first one */ ++ { + wake_thread( thread ); + ++ if (do_esync() && queue == &thread->user_apc) ++ esync_wake_fd( thread->esync_apc_fd ); ++ } ++ + return 1; + } + +@@ -1176,6 +1207,10 @@ static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system + apc = LIST_ENTRY( ptr, struct thread_apc, entry ); + list_remove( ptr ); + } ++ ++ if (do_esync() && list_empty( &thread->system_apc ) && list_empty( &thread->user_apc )) ++ esync_clear( thread->esync_apc_fd ); ++ + return apc; + } + +@@ -1292,6 +1327,8 @@ void kill_thread( struct thread *thread, int violent_death ) + } + kill_console_processes( thread, 0 ); + abandon_mutexes( thread ); ++ if (do_esync()) ++ esync_abandon_mutexes( thread ); + wake_up( &thread->obj, 0 ); + if (violent_death) send_thread_signal( thread, SIGQUIT ); + cleanup_thread( thread ); +diff --git a/server/thread.h b/server/thread.h +index 78ca4c201b2..0f6108b684a 100644 +--- a/server/thread.h ++++ b/server/thread.h +@@ -54,6 +54,8 @@ struct thread + struct process *process; + thread_id_t id; /* thread id */ + struct list mutex_list; /* list of currently owned mutexes */ ++ int esync_fd; /* esync file descriptor (signalled on exit) */ ++ int esync_apc_fd; /* esync apc fd (signalled when APCs are present) */ + unsigned int system_regs; /* which system regs have been set */ + struct msg_queue *queue; /* message queue */ + struct thread_wait *wait; /* current wait condition if sleeping */ +diff --git a/server/timer.c b/server/timer.c +index 9aba550fd93..dcbc9e2ece5 100644 +--- a/server/timer.c ++++ b/server/timer.c +@@ -36,6 +36,7 @@ + #include "file.h" + #include "handle.h" + #include "request.h" ++#include "esync.h" + + static const WCHAR timer_name[] = {'T','i','m','e','r'}; + +@@ -48,10 +49,12 @@ struct timer + struct thread *thread; /* thread that set the APC function */ + client_ptr_t callback; /* callback APC function */ + client_ptr_t arg; /* callback argument */ ++ int esync_fd; /* esync file descriptor */ + }; + + static void timer_dump( struct object *obj, int verbose ); + static int timer_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static int timer_get_esync_fd( struct object *obj, enum esync_type *type ); + static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ); + static void timer_destroy( struct object *obj ); + +@@ -65,6 +68,7 @@ static const struct object_ops timer_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + timer_signaled, /* signaled */ ++ timer_get_esync_fd, /* get_esync_fd */ + timer_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -99,6 +103,10 @@ static struct timer *create_timer( struct object *root, const struct unicode_str + timer->period = 0; + timer->timeout = NULL; + timer->thread = NULL; ++ timer->esync_fd = -1; ++ ++ if (do_esync()) ++ timer->esync_fd = esync_create_fd( 0, 0 ); + } + } + return timer; +@@ -172,6 +180,9 @@ static int set_timer( struct timer *timer, timeout_t expire, unsigned int period + { + period = 0; /* period doesn't make any sense for a manual timer */ + timer->signaled = 0; ++ ++ if (do_esync()) ++ esync_clear( timer->esync_fd ); + } + timer->when = (expire <= 0) ? expire - monotonic_time : max( expire, current_time ); + timer->period = period; +@@ -205,6 +216,13 @@ static int timer_signaled( struct object *obj, struct wait_queue_entry *entry ) + return timer->signaled; + } + ++static int timer_get_esync_fd( struct object *obj, enum esync_type *type ) ++{ ++ struct timer *timer = (struct timer *)obj; ++ *type = timer->manual ? ESYNC_MANUAL_SERVER : ESYNC_AUTO_SERVER; ++ return timer->esync_fd; ++} ++ + static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct timer *timer = (struct timer *)obj; +diff --git a/server/token.c b/server/token.c +index 68cfcf234c1..0f128728b0f 100644 +--- a/server/token.c ++++ b/server/token.c +@@ -147,6 +147,7 @@ static const struct object_ops token_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/window.c b/server/window.c +index 24059aac0fe..a79fda5ad80 100644 +--- a/server/window.c ++++ b/server/window.c +@@ -108,6 +108,7 @@ static const struct object_ops window_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +diff --git a/server/winstation.c b/server/winstation.c +index 883722eff36..1a031248a7c 100644 +--- a/server/winstation.c ++++ b/server/winstation.c +@@ -64,6 +64,7 @@ static const struct object_ops winstation_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -89,6 +90,7 @@ static const struct object_ops desktop_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +From 995fdfb8cdb0a8eed82e16640034dc9673ded681 Mon Sep 17 00:00:00 2001 +From: Paul Gofman +Date: Fri, 12 Mar 2021 23:58:39 +0300 +Subject: [PATCH 14/16] esync: Fix restoring the objects state on wait all + objects retry. + +--- + dlls/ntdll/unix/esync.c | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +diff --git a/dlls/ntdll/unix/esync.c b/dlls/ntdll/unix/esync.c +index 9a615fb277c..810477d02a0 100644 +--- a/dlls/ntdll/unix/esync.c ++++ b/dlls/ntdll/unix/esync.c +@@ -1168,10 +1168,22 @@ tryagain: + { + /* We were too slow. Put everything back. */ + value = 1; +- for (j = i; j >= 0; j--) ++ for (j = i - 1; j >= 0; j--) + { +- if (write( obj->fd, &value, sizeof(value) ) == -1) ++ struct esync *obj = objs[j]; ++ ++ if (obj->type == ESYNC_MUTEX) ++ { ++ struct mutex *mutex = obj->shm; ++ ++ if (mutex->tid == GetCurrentThreadId()) ++ continue; ++ } ++ if (write( fds[j].fd, &value, sizeof(value) ) == -1) ++ { ++ ERR("write failed.\n"); + return errno_to_status( errno ); ++ } + } + + goto tryagain; /* break out of two loops and a switch */ +-- +2.30.2 + +From 69afcb164ccf8d3ecd5e94cf79c1e31698e14e5c Mon Sep 17 00:00:00 2001 +From: Derek Lesho +Date: Wed, 2 Feb 2022 17:02:44 -0500 +Subject: [PATCH] esync: Type-check HANDLE in esync_set_event. + +Signed-off-by: Derek Lesho +--- + dlls/ntdll/unix/esync.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/dlls/ntdll/unix/esync.c b/dlls/ntdll/unix/esync.c +index 55c5695964d..4663374653a 100644 +--- a/dlls/ntdll/unix/esync.c ++++ b/dlls/ntdll/unix/esync.c +@@ -526,6 +526,9 @@ NTSTATUS esync_set_event( HANDLE handle ) + if ((ret = get_object( handle, &obj ))) return ret; + event = obj->shm; + ++ if (obj->type != ESYNC_MANUAL_EVENT && obj->type != ESYNC_AUTO_EVENT) ++ return STATUS_OBJECT_TYPE_MISMATCH; ++ + if (obj->type == ESYNC_MANUAL_EVENT) + { + /* Acquire the spinlock. */ +diff --git a/server/object.c b/server/object.c +index b1665fb5372..0a4d1bede06 100644 +--- a/server/object.c ++++ b/server/object.c +@@ -108,6 +108,7 @@ static const struct object_ops apc_reserve_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -132,6 +133,7 @@ static const struct object_ops completion_reserve_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ ++ NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ diff --git a/wine-tkg-git/wine-tkg-patches/proton/fsync/fsync-unix-mainline.patch b/wine-tkg-git/wine-tkg-patches/proton/fsync/fsync-unix-mainline.patch index d7d3ad117..6739f8920 100644 --- a/wine-tkg-git/wine-tkg-patches/proton/fsync/fsync-unix-mainline.patch +++ b/wine-tkg-git/wine-tkg-patches/proton/fsync/fsync-unix-mainline.patch @@ -3057,11 +3057,10 @@ index e59a5b6c183..d0ea32a24d5 100644 no_signal, /* signal */ named_pipe_device_file_get_fd, /* get_fd */ @@ -344,6 +344,7 @@ - add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ NULL, /* get_esync_fd */ -+ NULL, /* get_fsync_idx */ ++ NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ named_pipe_dir_get_fd, /* get_fd */ diff --git a/wine-tkg-git/wine-tkg-patches/proton/fsync/fsync-unix-staging.patch b/wine-tkg-git/wine-tkg-patches/proton/fsync/fsync-unix-staging.patch index 84b030f98..45bc7c626 100644 --- a/wine-tkg-git/wine-tkg-patches/proton/fsync/fsync-unix-staging.patch +++ b/wine-tkg-git/wine-tkg-patches/proton/fsync/fsync-unix-staging.patch @@ -3057,11 +3057,10 @@ index e59a5b6c183..d0ea32a24d5 100644 no_signal, /* signal */ named_pipe_device_file_get_fd, /* get_fd */ @@ -344,6 +344,7 @@ - add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ NULL, /* get_esync_fd */ -+ NULL, /* get_fsync_idx */ ++ NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ named_pipe_dir_get_fd, /* get_fd */