2 * winhandl.c: Module to give Windows front ends the general
3 * ability to deal with consoles, pipes, serial ports, or any other
4 * type of data stream accessed through a Windows API HANDLE rather
5 * than a WinSock SOCKET.
7 * We do this by spawning a subthread to continuously try to read
8 * from the handle. Every time a read successfully returns some
9 * data, the subthread sets an event object which is picked up by
10 * the main thread, and the main thread then sets an event in
11 * return to instruct the subthread to resume reading.
13 * Output works precisely the other way round, in a second
14 * subthread. The output subthread should not be attempting to
15 * write all the time, because it hasn't always got data _to_
16 * write; so the output thread waits for an event object notifying
17 * it to _attempt_ a write, and then it sets an event in return
20 * (It's terribly annoying having to spawn a subthread for each
21 * direction of each handle. Technically it isn't necessary for
22 * serial ports, since we could use overlapped I/O within the main
23 * thread and wait directly on the event objects in the OVERLAPPED
24 * structures. However, we can't use this trick for some types of
25 * file handle at all - for some reason Windows restricts use of
26 * OVERLAPPED to files which were opened with the overlapped flag -
27 * and so we must use threads for those. This being the case, it's
28 * simplest just to use threads for everything rather than trying
29 * to keep track of multiple completely separate mechanisms.)
36 /* ----------------------------------------------------------------------
37 * Generic definitions.
41 * Maximum amount of backlog we will allow to build up on an input
42 * handle before we stop reading from it.
44 #define MAX_BACKLOG 32768
46 struct handle_generic {
48 * Initial fields common to both handle_input and handle_output
51 * The three HANDLEs are set up at initialisation time and are
52 * thereafter read-only to both main thread and subthread.
53 * `moribund' is only used by the main thread; `done' is
54 * written by the main thread before signalling to the
55 * subthread. `defunct' and `busy' are used only by the main
58 HANDLE h; /* the handle itself */
59 HANDLE ev_to_main; /* event used to signal main thread */
60 HANDLE ev_from_main; /* event used to signal back to us */
61 int moribund; /* are we going to kill this soon? */
62 int done; /* request subthread to terminate */
63 int defunct; /* has the subthread already gone? */
64 int busy; /* operation currently in progress? */
65 void *privdata; /* for client to remember who they are */
68 /* ----------------------------------------------------------------------
73 * Data required by an input thread.
77 * Copy of the handle_generic structure.
79 HANDLE h; /* the handle itself */
80 HANDLE ev_to_main; /* event used to signal main thread */
81 HANDLE ev_from_main; /* event used to signal back to us */
82 int moribund; /* are we going to kill this soon? */
83 int done; /* request subthread to terminate */
84 int defunct; /* has the subthread already gone? */
85 int busy; /* operation currently in progress? */
86 void *privdata; /* for client to remember who they are */
89 * Data set at initialisation and then read-only.
94 * Data set by the input thread before signalling ev_to_main,
95 * and read by the main thread after receiving that signal.
97 char buffer[4096]; /* the data read from the handle */
98 DWORD len; /* how much data that was */
99 int readret; /* lets us know about read errors */
102 * Callback function called by this module when data arrives on
105 handle_inputfn_t gotdata;
109 * The actual thread procedure for an input thread.
111 static DWORD WINAPI handle_input_threadfunc(void *param)
113 struct handle_input *ctx = (struct handle_input *) param;
114 OVERLAPPED ovl, *povl;
118 if (ctx->flags & HANDLE_FLAG_OVERLAPPED) {
120 oev = CreateEvent(NULL, TRUE, FALSE, NULL);
125 if (ctx->flags & HANDLE_FLAG_UNITBUFFER)
128 readlen = sizeof(ctx->buffer);
132 memset(povl, 0, sizeof(OVERLAPPED));
135 ctx->readret = ReadFile(ctx->h, ctx->buffer, readlen,
137 if (povl && !ctx->readret && GetLastError() == ERROR_IO_PENDING) {
138 WaitForSingleObject(povl->hEvent, INFINITE);
139 ctx->readret = GetOverlappedResult(ctx->h, povl, &ctx->len, FALSE);
145 if (ctx->readret && ctx->len == 0 &&
146 (ctx->flags & HANDLE_FLAG_IGNOREEOF))
149 SetEvent(ctx->ev_to_main);
154 WaitForSingleObject(ctx->ev_from_main, INFINITE);
156 break; /* main thread told us to shut down */
166 * This is called after a succcessful read, or from the
167 * `unthrottle' function. It decides whether or not to begin a new
170 static void handle_throttle(struct handle_input *ctx, int backlog)
176 * If there's a read operation already in progress, do nothing:
177 * when that completes, we'll come back here and be in a
178 * position to make a better decision.
184 * Otherwise, we must decide whether to start a new read based
185 * on the size of the backlog.
187 if (backlog < MAX_BACKLOG) {
188 SetEvent(ctx->ev_from_main);
193 /* ----------------------------------------------------------------------
198 * Data required by an output thread.
200 struct handle_output {
202 * Copy of the handle_generic structure.
204 HANDLE h; /* the handle itself */
205 HANDLE ev_to_main; /* event used to signal main thread */
206 HANDLE ev_from_main; /* event used to signal back to us */
207 int moribund; /* are we going to kill this soon? */
208 int done; /* request subthread to terminate */
209 int defunct; /* has the subthread already gone? */
210 int busy; /* operation currently in progress? */
211 void *privdata; /* for client to remember who they are */
214 * Data set at initialisation and then read-only.
219 * Data set by the main thread before signalling ev_from_main,
220 * and read by the input thread after receiving that signal.
222 char *buffer; /* the data to write */
223 DWORD len; /* how much data there is */
226 * Data set by the input thread before signalling ev_to_main,
227 * and read by the main thread after receiving that signal.
229 DWORD lenwritten; /* how much data we actually wrote */
230 int writeret; /* return value from WriteFile */
233 * Data only ever read or written by the main thread.
235 bufchain queued_data; /* data still waiting to be written */
238 * Callback function called when the backlog in the bufchain
241 handle_outputfn_t sentdata;
244 static DWORD WINAPI handle_output_threadfunc(void *param)
246 struct handle_output *ctx = (struct handle_output *) param;
247 OVERLAPPED ovl, *povl;
249 if (ctx->flags & HANDLE_FLAG_OVERLAPPED)
255 WaitForSingleObject(ctx->ev_from_main, INFINITE);
257 SetEvent(ctx->ev_to_main);
261 memset(povl, 0, sizeof(OVERLAPPED));
262 ctx->writeret = WriteFile(ctx->h, ctx->buffer, ctx->len,
263 &ctx->lenwritten, povl);
264 if (povl && !ctx->writeret && GetLastError() == ERROR_IO_PENDING)
265 ctx->writeret = GetOverlappedResult(ctx->h, povl,
266 &ctx->lenwritten, TRUE);
268 SetEvent(ctx->ev_to_main);
276 static void handle_try_output(struct handle_output *ctx)
281 if (!ctx->busy && bufchain_size(&ctx->queued_data)) {
282 bufchain_prefix(&ctx->queued_data, &senddata, &sendlen);
283 ctx->buffer = senddata;
285 SetEvent(ctx->ev_from_main);
290 /* ----------------------------------------------------------------------
291 * Unified code handling both input and output threads.
297 struct handle_generic g;
298 struct handle_input i;
299 struct handle_output o;
303 static tree234 *handles_by_evtomain;
305 static int handle_cmp_evtomain(void *av, void *bv)
307 struct handle *a = (struct handle *)av;
308 struct handle *b = (struct handle *)bv;
310 if ((unsigned)a->u.g.ev_to_main < (unsigned)b->u.g.ev_to_main)
312 else if ((unsigned)a->u.g.ev_to_main > (unsigned)b->u.g.ev_to_main)
318 static int handle_find_evtomain(void *av, void *bv)
320 HANDLE *a = (HANDLE *)av;
321 struct handle *b = (struct handle *)bv;
323 if ((unsigned)*a < (unsigned)b->u.g.ev_to_main)
325 else if ((unsigned)*a > (unsigned)b->u.g.ev_to_main)
331 struct handle *handle_input_new(HANDLE handle, handle_inputfn_t gotdata,
332 void *privdata, int flags)
334 struct handle *h = snew(struct handle);
338 h->u.i.ev_to_main = CreateEvent(NULL, FALSE, FALSE, NULL);
339 h->u.i.ev_from_main = CreateEvent(NULL, FALSE, FALSE, NULL);
340 h->u.i.gotdata = gotdata;
341 h->u.i.defunct = FALSE;
342 h->u.i.moribund = FALSE;
344 h->u.i.privdata = privdata;
345 h->u.i.flags = flags;
347 if (!handles_by_evtomain)
348 handles_by_evtomain = newtree234(handle_cmp_evtomain);
349 add234(handles_by_evtomain, h);
351 CreateThread(NULL, 0, handle_input_threadfunc,
358 struct handle *handle_output_new(HANDLE handle, handle_outputfn_t sentdata,
359 void *privdata, int flags)
361 struct handle *h = snew(struct handle);
365 h->u.o.ev_to_main = CreateEvent(NULL, FALSE, FALSE, NULL);
366 h->u.o.ev_from_main = CreateEvent(NULL, FALSE, FALSE, NULL);
368 h->u.o.defunct = FALSE;
369 h->u.o.moribund = FALSE;
371 h->u.o.privdata = privdata;
372 bufchain_init(&h->u.o.queued_data);
373 h->u.o.sentdata = sentdata;
374 h->u.o.flags = flags;
376 if (!handles_by_evtomain)
377 handles_by_evtomain = newtree234(handle_cmp_evtomain);
378 add234(handles_by_evtomain, h);
380 CreateThread(NULL, 0, handle_output_threadfunc,
386 int handle_write(struct handle *h, const void *data, int len)
389 bufchain_add(&h->u.o.queued_data, data, len);
390 handle_try_output(&h->u.o);
391 return bufchain_size(&h->u.o.queued_data);
394 HANDLE *handle_get_events(int *nevents)
401 * Go through our tree counting the handle objects currently
402 * engaged in useful activity.
406 if (handles_by_evtomain) {
407 for (i = 0; (h = index234(handles_by_evtomain, i)) != NULL; i++) {
411 ret = sresize(ret, size, HANDLE);
413 ret[n++] = h->u.g.ev_to_main;
422 static void handle_destroy(struct handle *h)
425 bufchain_clear(&h->u.o.queued_data);
426 CloseHandle(h->u.g.ev_from_main);
427 CloseHandle(h->u.g.ev_to_main);
428 del234(handles_by_evtomain, h);
432 void handle_free(struct handle *h)
435 * If the handle is currently busy, we cannot immediately free
436 * it. Instead we must wait until it's finished its current
437 * operation, because otherwise the subthread will write to
438 * invalid memory after we free its context from under it.
440 assert(h && !h->u.g.moribund);
443 * Just set the moribund flag, which will be noticed next
444 * time an operation completes.
446 h->u.g.moribund = TRUE;
447 } else if (h->u.g.defunct) {
449 * There isn't even a subthread; we can go straight to
455 * The subthread is alive but not busy, so we now signal it
456 * to die. Set the moribund flag to indicate that it will
457 * want destroying after that.
459 h->u.g.moribund = TRUE;
462 SetEvent(h->u.g.ev_from_main);
466 void handle_got_event(HANDLE event)
470 assert(handles_by_evtomain);
471 h = find234(handles_by_evtomain, &event, handle_find_evtomain);
474 * This isn't an error condition. If two or more event
475 * objects were signalled during the same select operation,
476 * and processing of the first caused the second handle to
477 * be closed, then it will sometimes happen that we receive
478 * an event notification here for a handle which is already
479 * deceased. In that situation we simply do nothing.
484 if (h->u.g.moribund) {
486 * A moribund handle is already treated as dead from the
487 * external user's point of view, so do nothing with the
488 * actual event. Just signal the thread to die if
489 * necessary, or destroy the handle if not.
496 SetEvent(h->u.g.ev_from_main);
507 * A signal on an input handle means data has arrived.
509 if (h->u.i.len == 0) {
511 * EOF, or (nearly equivalently) read error.
513 h->u.i.gotdata(h, NULL, (h->u.i.readret ? 0 : -1));
514 h->u.i.defunct = TRUE;
516 backlog = h->u.i.gotdata(h, h->u.i.buffer, h->u.i.len);
517 handle_throttle(&h->u.i, backlog);
523 * A signal on an output handle means we have completed a
524 * write. Call the callback to indicate that the output
525 * buffer size has decreased, or to indicate an error.
527 if (!h->u.o.writeret) {
529 * Write error. Send a negative value to the callback,
530 * and mark the thread as defunct (because the output
531 * thread is terminating by now).
533 h->u.o.sentdata(h, -1);
534 h->u.o.defunct = TRUE;
536 bufchain_consume(&h->u.o.queued_data, h->u.o.lenwritten);
537 h->u.o.sentdata(h, bufchain_size(&h->u.o.queued_data));
538 handle_try_output(&h->u.o);
543 void handle_unthrottle(struct handle *h, int backlog)
546 handle_throttle(&h->u.i, backlog);
549 int handle_backlog(struct handle *h)
552 return bufchain_size(&h->u.o.queued_data);
555 void *handle_get_privdata(struct handle *h)
557 return h->u.g.privdata;