mirror of
https://git.tartarus.org/simon/putty.git
synced 2025-07-14 01:27:35 -05:00
Support OpenSSH delayed compression without a rekey.
The problem with OpenSSH delayed compression is that the spec has a race condition. Compression is enabled when the server sends USERAUTH_SUCCESS. In the server->client direction, that's fine: the USERAUTH_SUCCESS packet is not itself compressed, and the next packet in the same direction is. But in the client->server direction, this specification relies on there being a moment of half-duplex in the connection: the client can't send any outgoing packet _after_ whatever userauth packet the USERAUTH_SUCCESS was a response to, and _before_ finding out whether the response is USERAUTH_SUCCESS or something else. If it emitted, say, an SSH_MSG_IGNORE or initiated a rekey (perhaps due to a timeout), then that might cross in the network with USERAUTH_SUCCESS and the server wouldn't be able to know whether to treat it as compressed. My previous solution was to note the presence of delayed compression options in the server KEXINIT, but not to negotiate them in the initial key exchange. Instead, we conduct the userauth exchange with compression="none", and then once userauth has concluded, we trigger an immediate rekey in which we do accept delayed compression methods - because of course by that time they're no different from the non- delayed versions. And that means compression is enabled by the bidirectional NEWKEYS exchange, which lacks that race condition. I think OpenSSH itself gets away with this because its layer structure is structure so as to never send any such asynchronous transport-layer message in the middle of userauth. Ours is not. But my cunning plan is that now that my BPP abstraction includes a queue of packets to be sent and a callback that processes that queue on to the output raw data bufchain, it's possible to make that callback terminate early, to leave any dangerous transport-layer messages unsent while we wait for a userauth response. Specifically: if we've negotiated a delayed compression method and not yet seen USERAUTH_SUCCESS, then ssh2_bpp_handle_output will emit all packets from its queue up to and including the last one in the userauth type-code range, and keep back any further ones. The idea is that _if_ that last userauth message was one that might provoke USERAUTH_SUCCESS, we don't want to send any difficult things after it; if it's not (e.g. it's in the middle of some ongoing userauth process like k-i or GSS) then the userauth layer will know that, and will emit some further userauth packet on its own initiative which will clue us in that it's OK to release everything up to and including that one. (So in particular it wasn't even necessary to forbid _all_ transport- layer packets during userauth. I could have done that by reordering the output queue - packets in that queue haven't been assigned their sequence numbers yet, so that would have been safe - but it's more elegant not to have to.) One particular case we do have to be careful about is not trying to initiate a _rekey_ during userauth, if delayed compression is in the offing. That's because when we start rekeying, ssh2transport stops sending any higher-layer packets at all, to discourage servers from trying to ignore the KEXINIT and press on regardless - you don't get your higher-layer replies until you actually respond to the lower-layer interrupt. But in this case, if ssh2transport sent a KEXINIT, which ssh2bpp kept back in the queue to avoid a delayed compression race and would only send if another userauth packet followed it, which ssh2transport would never pass on to ssh2bpp's output queue, there'd be a complete protocol deadlock. So instead I defer any attempt to start a rekey until after userauth finishes (using the existing system for starting a deferred rekey at that moment, which was previously used for the _old_ delayed-compression strategy, and still has to be here anyway for GSSAPI purposes).
This commit is contained in:
176
ssh2bpp.c
176
ssh2bpp.c
@ -14,6 +14,7 @@ struct ssh2_bpp_direction {
|
||||
ssh2_cipher *cipher;
|
||||
ssh2_mac *mac;
|
||||
int etm_mode;
|
||||
const struct ssh_compression_alg *pending_compression;
|
||||
};
|
||||
|
||||
struct ssh2_bpp_state {
|
||||
@ -34,6 +35,7 @@ struct ssh2_bpp_state {
|
||||
ssh_compressor *out_comp;
|
||||
|
||||
int pending_newkeys;
|
||||
int pending_compression, seen_userauth_success;
|
||||
|
||||
BinaryPacketProtocol bpp;
|
||||
};
|
||||
@ -90,7 +92,7 @@ void ssh2_bpp_new_outgoing_crypto(
|
||||
BinaryPacketProtocol *bpp,
|
||||
const struct ssh2_cipheralg *cipher, const void *ckey, const void *iv,
|
||||
const struct ssh2_macalg *mac, int etm_mode, const void *mac_key,
|
||||
const struct ssh_compression_alg *compression)
|
||||
const struct ssh_compression_alg *compression, int delayed_compression)
|
||||
{
|
||||
struct ssh2_bpp_state *s;
|
||||
assert(bpp->vt == &ssh2_bpp_vtable);
|
||||
@ -134,20 +136,31 @@ void ssh2_bpp_new_outgoing_crypto(
|
||||
s->out.mac = NULL;
|
||||
}
|
||||
|
||||
/* 'compression' is always non-NULL, because no compression is
|
||||
* indicated by ssh_comp_none. But this setup call may return a
|
||||
* null out_comp. */
|
||||
s->out_comp = ssh_compressor_new(compression);
|
||||
if (s->out_comp)
|
||||
bpp_logevent(("Initialised %s compression",
|
||||
ssh_compressor_alg(s->out_comp)->text_name));
|
||||
if (delayed_compression && !s->seen_userauth_success) {
|
||||
s->out.pending_compression = compression;
|
||||
s->out_comp = NULL;
|
||||
|
||||
bpp_logevent(("Will enable %s compression after user authentication",
|
||||
s->out.pending_compression->text_name));
|
||||
} else {
|
||||
s->out.pending_compression = NULL;
|
||||
|
||||
/* 'compression' is always non-NULL, because no compression is
|
||||
* indicated by ssh_comp_none. But this setup call may return a
|
||||
* null out_comp. */
|
||||
s->out_comp = ssh_compressor_new(compression);
|
||||
|
||||
if (s->out_comp)
|
||||
bpp_logevent(("Initialised %s compression",
|
||||
ssh_compressor_alg(s->out_comp)->text_name));
|
||||
}
|
||||
}
|
||||
|
||||
void ssh2_bpp_new_incoming_crypto(
|
||||
BinaryPacketProtocol *bpp,
|
||||
const struct ssh2_cipheralg *cipher, const void *ckey, const void *iv,
|
||||
const struct ssh2_macalg *mac, int etm_mode, const void *mac_key,
|
||||
const struct ssh_compression_alg *compression)
|
||||
const struct ssh_compression_alg *compression, int delayed_compression)
|
||||
{
|
||||
struct ssh2_bpp_state *s;
|
||||
assert(bpp->vt == &ssh2_bpp_vtable);
|
||||
@ -185,19 +198,39 @@ void ssh2_bpp_new_incoming_crypto(
|
||||
s->in.mac = NULL;
|
||||
}
|
||||
|
||||
/* 'compression' is always non-NULL, because no compression is
|
||||
* indicated by ssh_comp_none. But this setup call may return a
|
||||
* null in_decomp. */
|
||||
s->in_decomp = ssh_decompressor_new(compression);
|
||||
if (s->in_decomp)
|
||||
bpp_logevent(("Initialised %s decompression",
|
||||
ssh_decompressor_alg(s->in_decomp)->text_name));
|
||||
if (delayed_compression && !s->seen_userauth_success) {
|
||||
s->in.pending_compression = compression;
|
||||
s->in_decomp = NULL;
|
||||
|
||||
bpp_logevent(("Will enable %s decompression after user authentication",
|
||||
s->in.pending_compression->text_name));
|
||||
} else {
|
||||
s->in.pending_compression = NULL;
|
||||
|
||||
/* 'compression' is always non-NULL, because no compression is
|
||||
* indicated by ssh_comp_none. But this setup call may return a
|
||||
* null in_decomp. */
|
||||
s->in_decomp = ssh_decompressor_new(compression);
|
||||
|
||||
if (s->in_decomp)
|
||||
bpp_logevent(("Initialised %s decompression",
|
||||
ssh_decompressor_alg(s->in_decomp)->text_name));
|
||||
}
|
||||
|
||||
/* Clear the pending_newkeys flag, so that handle_input below will
|
||||
* start consuming the input data again. */
|
||||
s->pending_newkeys = FALSE;
|
||||
}
|
||||
|
||||
int ssh2_bpp_rekey_inadvisable(BinaryPacketProtocol *bpp)
|
||||
{
|
||||
struct ssh2_bpp_state *s;
|
||||
assert(bpp->vt == &ssh2_bpp_vtable);
|
||||
s = container_of(bpp, struct ssh2_bpp_state, bpp);
|
||||
|
||||
return s->pending_compression;
|
||||
}
|
||||
|
||||
#define BPP_READ(ptr, len) do \
|
||||
{ \
|
||||
crMaybeWaitUntilV(s->bpp.input_eof || \
|
||||
@ -207,6 +240,8 @@ void ssh2_bpp_new_incoming_crypto(
|
||||
goto eof; \
|
||||
} while (0)
|
||||
|
||||
#define userauth_range(pkttype) ((unsigned)((pkttype) - 50) < 20)
|
||||
|
||||
static void ssh2_bpp_handle_input(BinaryPacketProtocol *bpp)
|
||||
{
|
||||
struct ssh2_bpp_state *s = container_of(bpp, struct ssh2_bpp_state, bpp);
|
||||
@ -537,6 +572,74 @@ static void ssh2_bpp_handle_input(BinaryPacketProtocol *bpp)
|
||||
*/
|
||||
s->pending_newkeys = TRUE;
|
||||
crWaitUntilV(!s->pending_newkeys);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (s->pending_compression && userauth_range(type)) {
|
||||
/*
|
||||
* Another one: if we were configured with OpenSSH's
|
||||
* deferred compression which is triggered on receipt
|
||||
* of USERAUTH_SUCCESS, and we're currently
|
||||
* anticipating the next packet perhaps _being_
|
||||
* USERAUTH_SUCCESS, then we do some special handling.
|
||||
*/
|
||||
|
||||
if (type == SSH2_MSG_USERAUTH_SUCCESS) {
|
||||
/*
|
||||
* Success! This is the moment to turn on
|
||||
* compression.
|
||||
*/
|
||||
s->pending_compression = FALSE;
|
||||
s->in_decomp =
|
||||
ssh_decompressor_new(s->in.pending_compression);
|
||||
s->out_comp =
|
||||
ssh_compressor_new(s->out.pending_compression);
|
||||
s->in.pending_compression = NULL;
|
||||
s->out.pending_compression = NULL;
|
||||
|
||||
if (s->out_comp)
|
||||
bpp_logevent(("Initialised delayed %s compression",
|
||||
ssh_compressor_alg(
|
||||
s->out_comp)->text_name));
|
||||
if (s->in_decomp)
|
||||
bpp_logevent(("Initialised delayed %s decompression",
|
||||
ssh_decompressor_alg(
|
||||
s->in_decomp)->text_name));
|
||||
|
||||
/*
|
||||
* Also, since we will have temporarily disabled
|
||||
* output queue processing (for fear of having
|
||||
* some asynchronous thing like an IGNORE message
|
||||
* cross in transit with USERAUTH_SUCCESS coming
|
||||
* the other way, leaving its compresssion status
|
||||
* in doubt), we should schedule a run of the
|
||||
* output queue now, to release any pending
|
||||
* packets.
|
||||
*/
|
||||
queue_idempotent_callback(&s->bpp.ic_out_pq);
|
||||
} else {
|
||||
/*
|
||||
* This message indicates that we're not about to
|
||||
* see USERAUTH_SUCCESS (i.e. turn on compression)
|
||||
* just yet, so we turn off the outgoing packet
|
||||
* blockage and release any queued output packets,
|
||||
* so that we can make another attempt to
|
||||
* authenticate.
|
||||
*/
|
||||
s->pending_compression = FALSE;
|
||||
queue_idempotent_callback(&s->bpp.ic_out_pq);
|
||||
}
|
||||
}
|
||||
|
||||
if (type == SSH2_MSG_USERAUTH_SUCCESS) {
|
||||
/*
|
||||
* Whether or not we were doing delayed compression in
|
||||
* _this_ set of crypto parameters, we should set a
|
||||
* flag indicating that we're now authenticated, so
|
||||
* that a delayed compression method enabled in any
|
||||
* future rekey will be treated as un-delayed.
|
||||
*/
|
||||
s->seen_userauth_success = TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -734,6 +837,31 @@ static void ssh2_bpp_handle_output(BinaryPacketProtocol *bpp)
|
||||
{
|
||||
struct ssh2_bpp_state *s = container_of(bpp, struct ssh2_bpp_state, bpp);
|
||||
PktOut *pkt;
|
||||
int n_userauth;
|
||||
|
||||
/*
|
||||
* Count the userauth packets in the queue.
|
||||
*/
|
||||
n_userauth = 0;
|
||||
for (pkt = pq_first(&s->bpp.out_pq); pkt != NULL;
|
||||
pkt = pq_next(&s->bpp.out_pq, pkt))
|
||||
if (userauth_range(pkt->type))
|
||||
n_userauth++;
|
||||
|
||||
if (s->pending_compression && !n_userauth) {
|
||||
/*
|
||||
* We're currently blocked from sending any outgoing packets
|
||||
* until the other end tells us whether we're going to have to
|
||||
* enable compression or not.
|
||||
*
|
||||
* If our end has pushed a userauth packet on the queue, that
|
||||
* must mean it knows that a USERAUTH_SUCCESS is not
|
||||
* immediately forthcoming, so we unblock ourselves and send
|
||||
* up to and including that packet. But in this if statement,
|
||||
* there aren't any, so we're still blocked.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->cbc_ignore_workaround) {
|
||||
/*
|
||||
@ -761,7 +889,23 @@ static void ssh2_bpp_handle_output(BinaryPacketProtocol *bpp)
|
||||
}
|
||||
|
||||
while ((pkt = pq_pop(&s->bpp.out_pq)) != NULL) {
|
||||
if (userauth_range(pkt->type))
|
||||
n_userauth--;
|
||||
|
||||
ssh2_bpp_format_packet(s, pkt);
|
||||
ssh_free_pktout(pkt);
|
||||
|
||||
if (n_userauth == 0 &&
|
||||
(s->out.pending_compression || s->in.pending_compression)) {
|
||||
/*
|
||||
* This is the last userauth packet in the queue, so
|
||||
* unless our side decides to send another one in future,
|
||||
* we have to assume will potentially provoke
|
||||
* USERAUTH_SUCCESS. Block (non-userauth) outgoing packets
|
||||
* until we see the reply.
|
||||
*/
|
||||
s->pending_compression = TRUE;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user