mirror of
https://git.tartarus.org/simon/putty.git
synced 2025-01-09 17:38:00 +00:00
4c8c41b7a0
The problem with OpenSSH delayed compression is that the spec has a race condition. Compression is enabled when the server sends USERAUTH_SUCCESS. In the server->client direction, that's fine: the USERAUTH_SUCCESS packet is not itself compressed, and the next packet in the same direction is. But in the client->server direction, this specification relies on there being a moment of half-duplex in the connection: the client can't send any outgoing packet _after_ whatever userauth packet the USERAUTH_SUCCESS was a response to, and _before_ finding out whether the response is USERAUTH_SUCCESS or something else. If it emitted, say, an SSH_MSG_IGNORE or initiated a rekey (perhaps due to a timeout), then that might cross in the network with USERAUTH_SUCCESS and the server wouldn't be able to know whether to treat it as compressed. My previous solution was to note the presence of delayed compression options in the server KEXINIT, but not to negotiate them in the initial key exchange. Instead, we conduct the userauth exchange with compression="none", and then once userauth has concluded, we trigger an immediate rekey in which we do accept delayed compression methods - because of course by that time they're no different from the non- delayed versions. And that means compression is enabled by the bidirectional NEWKEYS exchange, which lacks that race condition. I think OpenSSH itself gets away with this because its layer structure is structure so as to never send any such asynchronous transport-layer message in the middle of userauth. Ours is not. But my cunning plan is that now that my BPP abstraction includes a queue of packets to be sent and a callback that processes that queue on to the output raw data bufchain, it's possible to make that callback terminate early, to leave any dangerous transport-layer messages unsent while we wait for a userauth response. Specifically: if we've negotiated a delayed compression method and not yet seen USERAUTH_SUCCESS, then ssh2_bpp_handle_output will emit all packets from its queue up to and including the last one in the userauth type-code range, and keep back any further ones. The idea is that _if_ that last userauth message was one that might provoke USERAUTH_SUCCESS, we don't want to send any difficult things after it; if it's not (e.g. it's in the middle of some ongoing userauth process like k-i or GSS) then the userauth layer will know that, and will emit some further userauth packet on its own initiative which will clue us in that it's OK to release everything up to and including that one. (So in particular it wasn't even necessary to forbid _all_ transport- layer packets during userauth. I could have done that by reordering the output queue - packets in that queue haven't been assigned their sequence numbers yet, so that would have been safe - but it's more elegant not to have to.) One particular case we do have to be careful about is not trying to initiate a _rekey_ during userauth, if delayed compression is in the offing. That's because when we start rekeying, ssh2transport stops sending any higher-layer packets at all, to discourage servers from trying to ignore the KEXINIT and press on regardless - you don't get your higher-layer replies until you actually respond to the lower-layer interrupt. But in this case, if ssh2transport sent a KEXINIT, which ssh2bpp kept back in the queue to avoid a delayed compression race and would only send if another userauth packet followed it, which ssh2transport would never pass on to ssh2bpp's output queue, there'd be a complete protocol deadlock. So instead I defer any attempt to start a rekey until after userauth finishes (using the existing system for starting a deferred rekey at that moment, which was previously used for the _old_ delayed-compression strategy, and still has to be here anyway for GSSAPI purposes).
144 lines
5.8 KiB
C
144 lines
5.8 KiB
C
/*
|
|
* Abstraction of the binary packet protocols used in SSH.
|
|
*/
|
|
|
|
#ifndef PUTTY_SSHBPP_H
|
|
#define PUTTY_SSHBPP_H
|
|
|
|
struct BinaryPacketProtocolVtable {
|
|
void (*free)(BinaryPacketProtocol *);
|
|
void (*handle_input)(BinaryPacketProtocol *);
|
|
void (*handle_output)(BinaryPacketProtocol *);
|
|
PktOut *(*new_pktout)(int type);
|
|
void (*queue_disconnect)(BinaryPacketProtocol *,
|
|
const char *msg, int category);
|
|
};
|
|
|
|
struct BinaryPacketProtocol {
|
|
const struct BinaryPacketProtocolVtable *vt;
|
|
bufchain *in_raw, *out_raw;
|
|
int input_eof; /* set this if in_raw will never be added to again */
|
|
PktInQueue in_pq;
|
|
PktOutQueue out_pq;
|
|
PacketLogSettings *pls;
|
|
LogContext *logctx;
|
|
Ssh *ssh;
|
|
Frontend *frontend;
|
|
|
|
/* ic_in_raw is filled in by the BPP (probably by calling
|
|
* ssh_bpp_common_setup). The BPP's owner triggers it when data is
|
|
* added to in_raw, and also when the BPP is newly created. */
|
|
IdempotentCallback ic_in_raw;
|
|
|
|
/* ic_out_pq is entirely internal to the BPP itself; it's used as
|
|
* the callback on out_pq. */
|
|
IdempotentCallback ic_out_pq;
|
|
|
|
int remote_bugs;
|
|
|
|
/* Set this if remote connection closure should not generate an
|
|
* error message (either because it's not to be treated as an
|
|
* error at all, or because some other error message has already
|
|
* been emitted). */
|
|
int expect_close;
|
|
};
|
|
|
|
#define ssh_bpp_handle_input(bpp) ((bpp)->vt->handle_input(bpp))
|
|
#define ssh_bpp_handle_output(bpp) ((bpp)->vt->handle_output(bpp))
|
|
#define ssh_bpp_new_pktout(bpp, type) ((bpp)->vt->new_pktout(type))
|
|
#define ssh_bpp_queue_disconnect(bpp, msg, cat) \
|
|
((bpp)->vt->queue_disconnect(bpp, msg, cat))
|
|
|
|
/* ssh_bpp_free is more than just a macro wrapper on the vtable; it
|
|
* does centralised parts of the freeing too. */
|
|
void ssh_bpp_free(BinaryPacketProtocol *bpp);
|
|
|
|
BinaryPacketProtocol *ssh1_bpp_new(Frontend *frontend);
|
|
void ssh1_bpp_new_cipher(BinaryPacketProtocol *bpp,
|
|
const struct ssh1_cipheralg *cipher,
|
|
const void *session_key);
|
|
/* requested_compression() notifies the SSH-1 BPP that we've just sent
|
|
* a request to enable compression, which means that on receiving the
|
|
* next SSH1_SMSG_SUCCESS or SSH1_SMSG_FAILURE message, it should set
|
|
* up zlib compression if it was SUCCESS. */
|
|
void ssh1_bpp_requested_compression(BinaryPacketProtocol *bpp);
|
|
|
|
/* Helper routine which does common BPP initialisation, e.g. setting
|
|
* up in_pq and out_pq, and initialising input_consumer. */
|
|
void ssh_bpp_common_setup(BinaryPacketProtocol *);
|
|
|
|
/* Common helper functions between the SSH-2 full and bare BPPs */
|
|
void ssh2_bpp_queue_disconnect(BinaryPacketProtocol *bpp,
|
|
const char *msg, int category);
|
|
int ssh2_bpp_check_unimplemented(BinaryPacketProtocol *bpp, PktIn *pktin);
|
|
|
|
/*
|
|
* Structure that tracks how much data is sent and received, for
|
|
* purposes of triggering an SSH-2 rekey when either one gets over a
|
|
* configured limit. In each direction, the flag 'running' indicates
|
|
* that we haven't hit the limit yet, and 'remaining' tracks how much
|
|
* longer until we do. The macro DTS_CONSUME subtracts a given amount
|
|
* from the counter in a particular direction, and evaluates to a
|
|
* boolean indicating whether the limit has been hit.
|
|
*
|
|
* The limit is sticky: once 'running' has flipped to false,
|
|
* 'remaining' is no longer decremented, so it shouldn't dangerously
|
|
* wrap round.
|
|
*/
|
|
struct DataTransferStats {
|
|
struct {
|
|
int running;
|
|
unsigned long remaining;
|
|
} in, out;
|
|
};
|
|
#define DTS_CONSUME(stats, direction, size) \
|
|
((stats)->direction.running && \
|
|
(stats)->direction.remaining <= (size) ? \
|
|
((stats)->direction.running = FALSE, TRUE) : \
|
|
((stats)->direction.remaining -= (size), FALSE))
|
|
|
|
BinaryPacketProtocol *ssh2_bpp_new(
|
|
Frontend *frontend, struct DataTransferStats *stats);
|
|
void ssh2_bpp_new_outgoing_crypto(
|
|
BinaryPacketProtocol *bpp,
|
|
const struct ssh2_cipheralg *cipher, const void *ckey, const void *iv,
|
|
const struct ssh2_macalg *mac, int etm_mode, const void *mac_key,
|
|
const struct ssh_compression_alg *compression, int delayed_compression);
|
|
void ssh2_bpp_new_incoming_crypto(
|
|
BinaryPacketProtocol *bpp,
|
|
const struct ssh2_cipheralg *cipher, const void *ckey, const void *iv,
|
|
const struct ssh2_macalg *mac, int etm_mode, const void *mac_key,
|
|
const struct ssh_compression_alg *compression, int delayed_compression);
|
|
|
|
/*
|
|
* A query method specific to the interface between ssh2transport and
|
|
* ssh2bpp. If true, it indicates that we're potentially in the
|
|
* race-condition-prone part of delayed compression setup and so
|
|
* asynchronous outgoing transport-layer packets are currently not
|
|
* being sent, which means in particular that it would be a bad idea
|
|
* to start a rekey because then we'd stop responding to anything
|
|
* _other_ than transport-layer packets and deadlock the protocol.
|
|
*/
|
|
int ssh2_bpp_rekey_inadvisable(BinaryPacketProtocol *bpp);
|
|
|
|
BinaryPacketProtocol *ssh2_bare_bpp_new(Frontend *frontend);
|
|
|
|
/*
|
|
* The initial code to handle the SSH version exchange is also
|
|
* structured as an implementation of BinaryPacketProtocol, because
|
|
* that makes it easy to switch from that to the next BPP once it
|
|
* tells us which one we're using.
|
|
*/
|
|
struct ssh_version_receiver {
|
|
void (*got_ssh_version)(struct ssh_version_receiver *rcv,
|
|
int major_version);
|
|
};
|
|
BinaryPacketProtocol *ssh_verstring_new(
|
|
Conf *conf, Frontend *frontend, int bare_connection_mode,
|
|
const char *protoversion, struct ssh_version_receiver *rcv);
|
|
const char *ssh_verstring_get_remote(BinaryPacketProtocol *);
|
|
const char *ssh_verstring_get_local(BinaryPacketProtocol *);
|
|
int ssh_verstring_get_bugs(BinaryPacketProtocol *);
|
|
|
|
#endif /* PUTTY_SSHBPP_H */
|