1
0
mirror of https://git.tartarus.org/simon/putty.git synced 2025-01-09 17:38:00 +00:00
putty-source/windows/handle-wait.c

144 lines
4.2 KiB
C
Raw Normal View History

Reorganise Windows HANDLE management. Before commit 6e69223dc262755, Pageant would stop working after a certain number of PuTTYs were active at the same time. (At most about 60, but maybe fewer - see below.) This was because of two separate bugs. The easy one, fixed in 6e69223dc262755 itself, was that PuTTY left each named-pipe connection to Pageant open for the rest of its lifetime. So the real problem was that Pageant had too many active connections at once. (And since a given PuTTY might make multiple connections during userauth - one to list keys, and maybe another to actually make a signature - that was why the number of _PuTTYs_ might vary.) It was clearly a bug that PuTTY was leaving connections to Pageant needlessly open. But it was _also_ a bug that Pageant couldn't handle more than about 60 at once. In this commit, I fix that secondary bug. The cause of the bug is that the WaitForMultipleObjects function family in the Windows API have a limit on the number of HANDLE objects they can select between. The limit is MAXIMUM_WAIT_OBJECTS, defined to be 64. And handle-io.c was using a separate event object for each I/O subthread to communicate back to the main thread, so as soon as all those event objects (plus a handful of other HANDLEs) added up to more than 64, we'd start passing an overlarge handle array to WaitForMultipleObjects, and it would start not doing what we wanted. To fix this, I've reorganised handle-io.c so that all its subthreads share just _one_ event object to signal readiness back to the main thread. There's now a linked list of 'struct handle' objects that are ready to be processed, protected by a CRITICAL_SECTION. Each subthread signals readiness by adding itself to the linked list, and setting the event object to indicate that the list is now non-empty. When the main thread receives the event, it iterates over the whole list processing all the ready handles. (Each 'struct handle' still has a separate event object for the main thread to use to communicate _to_ the subthread. That's OK, because no thread is ever waiting on all those events at once: each subthread only waits on its own.) The previous HT_FOREIGN system didn't really fit into this framework. So I've moved it out into its own system. There's now a handle-wait.c which deals with the relatively simple job of managing a list of handles that need to be waited for, each with a callback function; that's what communicates a list of HANDLEs to event loops, and receives the notification when the event loop notices that one of them has done something. And handle-io.c is now just one client of handle-wait.c, providing a single HANDLE to the event loop, and dealing internally with everything that needs to be done when that handle fires. The new top-level handle-wait.c system *still* can't deal with more than MAXIMUM_WAIT_OBJECTS. At the moment, I'm reasonably convinced it doesn't need to: the only kind of HANDLE that any of our tools could previously have needed to wait on more than one of was the one in handle-io.c that I've just removed. But I've left some assertions and a TODO comment in there just in case we need to change that in future.
2021-05-24 12:06:10 +00:00
/*
* handle-wait.c: Manage a collection of HANDLEs to wait for (in a
* WaitFor{Single,Multiple}Objects sense), each with a callback to be
* called when it's activated. Tracks the list, and provides an API to
* event loops that let them get a list of things to wait for and a
* way to call back to here when one of them does something.
*/
/*
* TODO: currently this system can't cope with more than
* MAXIMUM_WAIT_OBJECTS (= 64) handles at a time. It enforces that by
* assertion, so we'll at least find out if that assumption is ever
* violated.
*
* It should be OK for the moment. As of 2021-05-24, the only uses of
* this system are by the ConPTY backend (just once, to watch for its
* subprocess terminating); by Pageant (for the event that the
* WM_COPYDATA subthread uses to signal the main thread); and by
* named-pipe-server.c (once per named-pipe server, of which there is
* one in Pageant and one in connection-sharing upstreams). So the
* total number of handles has a pretty small upper bound.
*
* But sooner or later, I'm sure we'll find a reason why we really
* need to watch a squillion handles at once. When that happens, I
* can't see any alternative to setting up some kind of tree of
* subthreads in this module, each one condensing 64 of our handles
* into one, by doing its own WaitForMultipleObjects and setting an
* event object to indicate that one of them did something. It'll be
* horribly ugly.
*/
#include "putty.h"
struct HandleWait {
HANDLE handle;
handle_wait_callback_fn_t callback;
void *callback_ctx;
int index; /* sort key for tree234 */
};
struct HandleWaitListInner {
HandleWait *hws[MAXIMUM_WAIT_OBJECTS];
HANDLE handles[MAXIMUM_WAIT_OBJECTS];
struct HandleWaitList hwl;
};
static int handlewait_cmp(void *av, void *bv)
{
HandleWait *a = (HandleWait *)av, *b = (HandleWait *)bv;
if (a->index < b->index)
return -1;
if (a->index > b->index)
return +1;
return 0;
}
static tree234 *handlewaits_tree_real;
static inline tree234 *ensure_handlewaits_tree_exists(void)
{
if (!handlewaits_tree_real)
handlewaits_tree_real = newtree234(handlewait_cmp);
return handlewaits_tree_real;
}
static int allocate_index(void)
{
tree234 *t = ensure_handlewaits_tree_exists();
search234_state st[1];
search234_start(st, t);
while (st->element) {
HandleWait *hw = (HandleWait *)st->element;
if (st->index < hw->index) {
/* There are unused index slots to the left of this element */
search234_step(st, -1);
} else {
assert(st->index == hw->index);
search234_step(st, +1);
}
}
return st->index;
}
HandleWait *add_handle_wait(HANDLE h, handle_wait_callback_fn_t callback,
void *callback_ctx)
{
HandleWait *hw = snew(HandleWait);
hw->handle = h;
hw->callback = callback;
hw->callback_ctx = callback_ctx;
tree234 *t = ensure_handlewaits_tree_exists();
hw->index = allocate_index();
HandleWait *added = add234(t, hw);
assert(added == hw);
return hw;
}
void delete_handle_wait(HandleWait *hw)
{
tree234 *t = ensure_handlewaits_tree_exists();
HandleWait *deleted = del234(t, hw);
assert(deleted == hw);
sfree(hw);
}
HandleWaitList *get_handle_wait_list(void)
{
tree234 *t = ensure_handlewaits_tree_exists();
struct HandleWaitListInner *hwli = snew(struct HandleWaitListInner);
size_t n = 0;
HandleWait *hw;
for (int i = 0; (hw = index234(t, i)) != NULL; i++) {
assert(n < MAXIMUM_WAIT_OBJECTS);
hwli->hws[n] = hw;
hwli->hwl.handles[n] = hw->handle;
n++;
}
hwli->hwl.nhandles = n;
return &hwli->hwl;
}
void handle_wait_activate(HandleWaitList *hwl, int index)
{
struct HandleWaitListInner *hwli =
container_of(hwl, struct HandleWaitListInner, hwl);
assert(0 <= index);
assert(index < hwli->hwl.nhandles);
HandleWait *hw = hwli->hws[index];
hw->callback(hw->callback_ctx);
}
void handle_wait_list_free(HandleWaitList *hwl)
{
struct HandleWaitListInner *hwli =
container_of(hwl, struct HandleWaitListInner, hwl);
sfree(hwli);
}