2006-04-23 18:26:03 +00:00
|
|
|
/*
|
|
|
|
* Terminal emulator.
|
|
|
|
*/
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2000-10-24 10:47:49 +00:00
|
|
|
#include <ctype.h>
|
2015-10-07 22:54:39 +00:00
|
|
|
#include <limits.h>
|
2018-03-11 17:40:42 +00:00
|
|
|
#include <wchar.h>
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2001-01-07 18:24:59 +00:00
|
|
|
#include <time.h>
|
2001-04-28 15:32:25 +00:00
|
|
|
#include <assert.h>
|
1999-01-08 13:02:13 +00:00
|
|
|
#include "putty.h"
|
2002-10-22 16:11:33 +00:00
|
|
|
#include "terminal.h"
|
|
|
|
|
2001-05-10 08:34:20 +00:00
|
|
|
#define VT52_PLUS
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
#define CL_ANSIMIN 0x0001 /* Codes in all ANSI like terminals. */
|
|
|
|
#define CL_VT100 0x0002 /* VT100 */
|
|
|
|
#define CL_VT100AVO 0x0004 /* VT100 +AVO; 132x24 (not 132x14) & attrs */
|
|
|
|
#define CL_VT102 0x0008 /* VT102 */
|
|
|
|
#define CL_VT220 0x0010 /* VT220 */
|
|
|
|
#define CL_VT320 0x0020 /* VT320 */
|
|
|
|
#define CL_VT420 0x0040 /* VT420 */
|
|
|
|
#define CL_VT510 0x0080 /* VT510, NB VT510 includes ANSI */
|
|
|
|
#define CL_VT340TEXT 0x0100 /* VT340 extensions that appear in the VT420 */
|
|
|
|
#define CL_SCOANSI 0x1000 /* SCOANSI not in ANSIMIN. */
|
|
|
|
#define CL_ANSI 0x2000 /* ANSI ECMA-48 not in the VT100..VT420 */
|
|
|
|
#define CL_OTHER 0x4000 /* Others, Xterm, linux, putty, dunno, etc */
|
|
|
|
|
|
|
|
#define TM_VT100 (CL_ANSIMIN|CL_VT100)
|
|
|
|
#define TM_VT100AVO (TM_VT100|CL_VT100AVO)
|
|
|
|
#define TM_VT102 (TM_VT100AVO|CL_VT102)
|
|
|
|
#define TM_VT220 (TM_VT102|CL_VT220)
|
|
|
|
#define TM_VTXXX (TM_VT220|CL_VT340TEXT|CL_VT510|CL_VT420|CL_VT320)
|
|
|
|
#define TM_SCOANSI (CL_ANSIMIN|CL_SCOANSI)
|
|
|
|
|
|
|
|
#define TM_PUTTY (0xFFFF)
|
2000-03-17 10:39:05 +00:00
|
|
|
|
2004-11-27 13:20:21 +00:00
|
|
|
#define UPDATE_DELAY ((TICKSPERSEC+49)/50)/* ticks to defer window update */
|
|
|
|
#define TBLINK_DELAY ((TICKSPERSEC*9+19)/20)/* ticks between text blinks*/
|
|
|
|
#define CBLINK_DELAY (CURSORBLINK) /* ticks between cursor blinks */
|
|
|
|
#define VBELL_DELAY (VBELL_TIMEOUT) /* visual bell timeout in ticks */
|
|
|
|
|
2000-03-17 10:39:05 +00:00
|
|
|
#define compatibility(x) \
|
2019-09-08 19:29:00 +00:00
|
|
|
if ( ((CL_##x)&term->compatibility_level) == 0 ) { \
|
|
|
|
term->termstate=TOPLEVEL; \
|
|
|
|
break; \
|
2000-03-17 10:39:05 +00:00
|
|
|
}
|
2000-07-26 12:13:51 +00:00
|
|
|
#define compatibility2(x,y) \
|
2002-10-22 16:11:33 +00:00
|
|
|
if ( ((CL_##x|CL_##y)&term->compatibility_level) == 0 ) { \
|
2019-09-08 19:29:00 +00:00
|
|
|
term->termstate=TOPLEVEL; \
|
|
|
|
break; \
|
2000-07-26 12:13:51 +00:00
|
|
|
}
|
2000-03-17 10:39:05 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
#define has_compat(x) ( ((CL_##x)&term->compatibility_level) != 0 )
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2020-01-29 06:22:01 +00:00
|
|
|
static const char *const EMPTY_WINDOW_TITLE = "";
|
2006-12-31 15:33:33 +00:00
|
|
|
|
2020-01-29 06:22:01 +00:00
|
|
|
static const char sco2ansicolour[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
|
2003-06-15 22:05:05 +00:00
|
|
|
|
2001-05-10 08:34:20 +00:00
|
|
|
#define sel_nl_sz (sizeof(sel_nl)/sizeof(wchar_t))
|
2020-01-29 06:22:01 +00:00
|
|
|
static const wchar_t sel_nl[] = SEL_NL;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
/* forward declaration */
|
|
|
|
static void term_userpass_state_free(struct term_userpass_state *s);
|
|
|
|
|
2003-01-02 16:20:29 +00:00
|
|
|
/*
|
|
|
|
* Fetch the character at a particular position in a line array,
|
|
|
|
* for purposes of `wordtype'. The reason this isn't just a simple
|
|
|
|
* array reference is that if the character we find is UCSWIDE,
|
|
|
|
* then we must look one space further to the left.
|
|
|
|
*/
|
|
|
|
#define UCSGET(a, x) \
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
( (x)>0 && (a)[(x)].chr == UCSWIDE ? (a)[(x)-1].chr : (a)[(x)].chr )
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Detect the various aliases of U+0020 SPACE.
|
|
|
|
*/
|
|
|
|
#define IS_SPACE_CHR(chr) \
|
2019-09-08 19:29:00 +00:00
|
|
|
((chr) == 0x20 || (DIRECT_CHAR(chr) && ((chr) & 0xFF) == 0x20))
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Spot magic CSETs.
|
|
|
|
*/
|
|
|
|
#define CSET_OF(chr) (DIRECT_CHAR(chr)||DIRECT_FONT(chr) ? (chr)&CSET_MASK : 0)
|
2003-01-02 16:20:29 +00:00
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Internal prototypes.
|
|
|
|
*/
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
static void resizeline(Terminal *, termline *, int);
|
2023-03-04 17:04:07 +00:00
|
|
|
static termline *lineptr(Terminal *, int, int);
|
2014-07-23 21:48:02 +00:00
|
|
|
static void check_line_size(Terminal *, termline *);
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
static void do_paint(Terminal *);
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void erase_lots(Terminal *, bool, bool, bool);
|
2006-02-19 14:59:48 +00:00
|
|
|
static int find_last_nonempty_line(Terminal *, tree234 *);
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void swap_screen(Terminal *, int, bool, bool);
|
2002-10-22 16:11:33 +00:00
|
|
|
static void update_sbar(Terminal *);
|
|
|
|
static void deselect(Terminal *);
|
|
|
|
static void term_print_finish(Terminal *);
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void scroll(Terminal *, int, int, int, bool);
|
2017-09-30 16:32:32 +00:00
|
|
|
static void parse_optionalrgb(optionalrgb *out, unsigned *values);
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
static void term_added_data(Terminal *term, bool);
|
2021-02-07 19:59:21 +00:00
|
|
|
static void term_update_raw_mouse_mode(Terminal *term);
|
Suspend terminal output while a window resize is pending.
This is the payoff from the last few commits of refactoring. It fixes
the following race-condition bug in terminal application redraw:
* server sends a window-resizing escape sequence
* terminal requests a window resize from the front end
* server sends further escape sequences to perform a redraw of some
full-screen application, which assume that the window resize has
occurred and the window is already its new size
* terminal processes all those sequences in the context of the old
window size, while the front end is still thinking
* window resize completes in the front end and term_size() tells the
terminal it now has its new size, but it's too late, the screen
redraw has made a total mess.
(Perhaps the server might even send its window resize + followup
redraw all in one SSH packet, so that it's all queued in term->inbuf
in one go.)
As far as I can see, handling of this case has been broken more or
less forever in the GTK frontend (where window resizes are inherently
asynchronous due to the way X11 works, and we've never done anything
to compensate for that). On Windows, where window size is changed via
SetWindowPos which is synchronous, it used to work, but broke in
commit d74308e90e3813a (i.e. between 0.74 and 0.75), which made all
the ancillary window updates run on the same delayed-action timer as
ordinary text display.
So, it's time to fix it, and I think now I should be able to fix it in
GTK as well as on Windows.
Now, as soon as we've set the term->win_resize_pending flag (in
response to a resize escape sequence), the next return to the top of
the main loop in term_out will terminate output processing early,
leaving any further terminal data still in the term->inbuf bufchain.
Once we get a term_size() callback from the front end telling us our
new size, we reset term->win_resize_pending, which unblocks output
processing again, and we also queue a toplevel callback to have
another try at term_out() so that it will be unblocked promptly.
To implement this I've changed term->win_resize_pending from a bool
into a three-state enumeration, so that we can tell the difference
between 'pending' in the sense of not yet having sent our resize
request to the frontend, and in the sense of waiting for the frontend
to reply. That way, a window resize from the GUI user at least won't
be mistaken for the response to our resize request if it arrives in
the former state. (It can still be mistaken for one in the latter
case, but if the user is resizing the window at the same time as the
server-side application is doing critically size-dependent redrawing,
I don't think there can be any reasonable expectation of nothing going
wrong.)
As mentioned in the previous commit, some failure modes under X11 (in
particular the window manager process getting wedged in some way) can
result in no response being received to a ConfigureWindow request. In
that situation, it seems to me that we really _shouldn't_ sit there
waiting forever - perhaps it's technically the WM's fault and not
ours, but what kind of X window are you most likely to want to use to
do emergency WM repair? A terminal window, of course, so it would be
exceptionally unhelpful to make any terminal window stop working
completely in this situation! Hence, there's a fallback timeout in
terminal.c, so that if we don't receive a response in _too_ long,
we'll assume one is not forthcoming, and resume processing terminal
data at the old window size. The fallback timeout is set to 5 seconds,
following existing practice in libXt (DEFAULT_WM_TIMEOUT).
2021-12-19 10:37:02 +00:00
|
|
|
static void term_out_cb(void *);
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2019-02-26 18:36:20 +00:00
|
|
|
static termline *newtermline(Terminal *term, int cols, bool bce)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
|
|
|
termline *line;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
line = snew(termline);
|
|
|
|
line->chars = snewn(cols, termchar);
|
|
|
|
for (j = 0; j < cols; j++)
|
2019-09-08 19:29:00 +00:00
|
|
|
line->chars[j] = (bce ? term->erase_char : term->basic_erase_char);
|
2004-10-14 16:42:43 +00:00
|
|
|
line->cols = line->size = cols;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
line->lattr = LATTR_NORM;
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
line->trusted = false;
|
2018-10-29 19:50:29 +00:00
|
|
|
line->temporary = false;
|
2004-10-14 16:42:43 +00:00
|
|
|
line->cc_free = 0;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
return line;
|
|
|
|
}
|
|
|
|
|
2019-02-26 18:36:20 +00:00
|
|
|
static void freetermline(termline *line)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
2004-10-13 13:35:29 +00:00
|
|
|
if (line) {
|
2019-09-08 19:29:00 +00:00
|
|
|
sfree(line->chars);
|
|
|
|
sfree(line);
|
2004-10-13 13:35:29 +00:00
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
2023-03-04 17:19:24 +00:00
|
|
|
void term_release_line(termline *line)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
|
|
|
if (line->temporary)
|
2019-09-08 19:29:00 +00:00
|
|
|
freetermline(line);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
2021-02-07 19:59:20 +00:00
|
|
|
const int colour_indices_conf_to_oscp[CONF_NCOLOURS] = {
|
|
|
|
#define COLOUR_ENTRY(id,name) OSCP_COLOUR_##id,
|
|
|
|
CONF_COLOUR_LIST(COLOUR_ENTRY)
|
|
|
|
#undef COLOUR_ENTRY
|
|
|
|
};
|
|
|
|
|
|
|
|
const int colour_indices_conf_to_osc4[CONF_NCOLOURS] = {
|
|
|
|
#define COLOUR_ENTRY(id,name) OSC4_COLOUR_##id,
|
|
|
|
CONF_COLOUR_LIST(COLOUR_ENTRY)
|
|
|
|
#undef COLOUR_ENTRY
|
|
|
|
};
|
|
|
|
|
|
|
|
const int colour_indices_oscp_to_osc4[OSCP_NCOLOURS] = {
|
|
|
|
#define COLOUR_ENTRY(id) OSC4_COLOUR_##id,
|
|
|
|
OSCP_COLOUR_LIST(COLOUR_ENTRY)
|
|
|
|
#undef COLOUR_ENTRY
|
|
|
|
};
|
|
|
|
|
2005-04-01 13:25:13 +00:00
|
|
|
#ifdef TERM_CC_DIAGS
|
2004-10-14 16:42:43 +00:00
|
|
|
/*
|
|
|
|
* Diagnostic function: verify that a termline has a correct
|
|
|
|
* combining character structure.
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2005-04-01 13:25:13 +00:00
|
|
|
* This is a performance-intensive check, so it's no longer enabled
|
|
|
|
* by default.
|
2004-10-14 16:42:43 +00:00
|
|
|
*/
|
|
|
|
static void cc_check(termline *line)
|
|
|
|
{
|
|
|
|
unsigned char *flags;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
assert(line->size >= line->cols);
|
|
|
|
|
|
|
|
flags = snewn(line->size, unsigned char);
|
|
|
|
|
|
|
|
for (i = 0; i < line->size; i++)
|
2019-09-08 19:29:00 +00:00
|
|
|
flags[i] = (i < line->cols);
|
2004-10-14 16:42:43 +00:00
|
|
|
|
|
|
|
for (i = 0; i < line->cols; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
j = i;
|
|
|
|
while (line->chars[j].cc_next) {
|
|
|
|
j += line->chars[j].cc_next;
|
|
|
|
assert(j >= line->cols && j < line->size);
|
|
|
|
assert(!flags[j]);
|
|
|
|
flags[j] = true;
|
|
|
|
}
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
j = line->cc_free;
|
|
|
|
if (j) {
|
2019-09-08 19:29:00 +00:00
|
|
|
while (1) {
|
|
|
|
assert(j >= line->cols && j < line->size);
|
|
|
|
assert(!flags[j]);
|
|
|
|
flags[j] = true;
|
|
|
|
if (line->chars[j].cc_next)
|
|
|
|
j += line->chars[j].cc_next;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
j = 0;
|
|
|
|
for (i = 0; i < line->size; i++)
|
2019-09-08 19:29:00 +00:00
|
|
|
j += (flags[i] != 0);
|
2004-10-14 16:42:43 +00:00
|
|
|
|
|
|
|
assert(j == line->size);
|
2004-10-15 12:37:44 +00:00
|
|
|
|
|
|
|
sfree(flags);
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
2005-04-01 13:25:13 +00:00
|
|
|
#endif
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2019-03-01 19:20:12 +00:00
|
|
|
static void clear_cc(termline *line, int col);
|
|
|
|
|
2004-10-14 16:42:43 +00:00
|
|
|
/*
|
|
|
|
* Add a combining character to a character cell.
|
|
|
|
*/
|
|
|
|
static void add_cc(termline *line, int col, unsigned long chr)
|
|
|
|
{
|
|
|
|
int newcc;
|
|
|
|
|
|
|
|
assert(col >= 0 && col < line->cols);
|
|
|
|
|
|
|
|
/*
|
2019-03-01 19:20:12 +00:00
|
|
|
* Don't add combining characters at all to U+FFFD REPLACEMENT
|
|
|
|
* CHARACTER. (Partly it's a slightly incoherent idea in the first
|
|
|
|
* place; mostly, U+FFFD is what we generate if a cell already has
|
|
|
|
* too many ccs, in which case we want it to be a fixed point when
|
|
|
|
* further ccs are added.)
|
|
|
|
*/
|
|
|
|
if (line->chars[col].chr == 0xFFFD)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Walk the cc list of the cell in question to find its current
|
|
|
|
* end point.
|
|
|
|
*/
|
|
|
|
size_t ncc = 0;
|
|
|
|
int origcol = col;
|
|
|
|
while (line->chars[col].cc_next) {
|
2019-09-08 19:29:00 +00:00
|
|
|
col += line->chars[col].cc_next;
|
2019-03-01 19:20:12 +00:00
|
|
|
if (++ncc >= CC_LIMIT) {
|
|
|
|
/*
|
|
|
|
* There are already too many combining characters in this
|
|
|
|
* character cell. Change strategy: throw out the entire
|
|
|
|
* chain and replace the main character with U+FFFD.
|
|
|
|
*
|
|
|
|
* (Rationale: extrapolating from UTR #36 section 3.6.2
|
|
|
|
* suggests the principle that it's better to substitute
|
|
|
|
* U+FFFD than to _ignore_ input completely. Also, if the
|
|
|
|
* user copies and pastes an overcombined character cell,
|
|
|
|
* this way it will clearly indicate that we haven't
|
|
|
|
* reproduced the writer's original intentions, instead of
|
|
|
|
* looking as if it was the _writer's_ fault that the 33rd
|
|
|
|
* cc is missing.)
|
|
|
|
*
|
|
|
|
* Per the code above, this will also prevent any further
|
|
|
|
* ccs from being added to this cell.
|
|
|
|
*/
|
|
|
|
clear_cc(line, origcol);
|
|
|
|
line->chars[origcol].chr = 0xFFFD;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extend the cols array if the free list is empty.
|
2004-10-14 16:42:43 +00:00
|
|
|
*/
|
|
|
|
if (!line->cc_free) {
|
2019-09-08 19:29:00 +00:00
|
|
|
int n = line->size;
|
2019-03-02 06:35:49 +00:00
|
|
|
|
|
|
|
size_t tmpsize = line->size;
|
|
|
|
sgrowarray(line->chars, tmpsize, tmpsize);
|
|
|
|
assert(tmpsize <= INT_MAX);
|
|
|
|
line->size = tmpsize;
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
line->cc_free = n;
|
|
|
|
while (n < line->size) {
|
|
|
|
if (n+1 < line->size)
|
|
|
|
line->chars[n].cc_next = 1;
|
|
|
|
else
|
|
|
|
line->chars[n].cc_next = 0;
|
|
|
|
n++;
|
|
|
|
}
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* `col' now points at the last cc currently in this cell; so
|
|
|
|
* we simply add another one.
|
|
|
|
*/
|
|
|
|
newcc = line->cc_free;
|
|
|
|
if (line->chars[newcc].cc_next)
|
2019-09-08 19:29:00 +00:00
|
|
|
line->cc_free = newcc + line->chars[newcc].cc_next;
|
2004-10-14 16:42:43 +00:00
|
|
|
else
|
2019-09-08 19:29:00 +00:00
|
|
|
line->cc_free = 0;
|
2004-10-14 16:42:43 +00:00
|
|
|
line->chars[newcc].cc_next = 0;
|
|
|
|
line->chars[newcc].chr = chr;
|
|
|
|
line->chars[col].cc_next = newcc - col;
|
|
|
|
|
2005-04-01 13:25:13 +00:00
|
|
|
#ifdef TERM_CC_DIAGS
|
|
|
|
cc_check(line);
|
|
|
|
#endif
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the combining character list in a character cell.
|
|
|
|
*/
|
|
|
|
static void clear_cc(termline *line, int col)
|
|
|
|
{
|
|
|
|
int oldfree, origcol = col;
|
|
|
|
|
|
|
|
assert(col >= 0 && col < line->cols);
|
|
|
|
|
|
|
|
if (!line->chars[col].cc_next)
|
2019-09-08 19:29:00 +00:00
|
|
|
return; /* nothing needs doing */
|
2004-10-14 16:42:43 +00:00
|
|
|
|
|
|
|
oldfree = line->cc_free;
|
|
|
|
line->cc_free = col + line->chars[col].cc_next;
|
|
|
|
while (line->chars[col].cc_next)
|
2019-09-08 19:29:00 +00:00
|
|
|
col += line->chars[col].cc_next;
|
2004-10-14 16:42:43 +00:00
|
|
|
if (oldfree)
|
2019-09-08 19:29:00 +00:00
|
|
|
line->chars[col].cc_next = oldfree - col;
|
2004-10-14 16:42:43 +00:00
|
|
|
else
|
2019-09-08 19:29:00 +00:00
|
|
|
line->chars[col].cc_next = 0;
|
2004-10-14 16:42:43 +00:00
|
|
|
|
|
|
|
line->chars[origcol].cc_next = 0;
|
|
|
|
|
2005-04-01 13:25:13 +00:00
|
|
|
#ifdef TERM_CC_DIAGS
|
|
|
|
cc_check(line);
|
|
|
|
#endif
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compare two character cells for equality. Special case required
|
|
|
|
* in do_paint() where we override what we expect the chr and attr
|
|
|
|
* fields to be.
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static bool termchars_equal_override(termchar *a, termchar *b,
|
|
|
|
unsigned long bchr, unsigned long battr)
|
2004-10-14 16:42:43 +00:00
|
|
|
{
|
|
|
|
/* FULL-TERMCHAR */
|
2017-09-30 16:32:32 +00:00
|
|
|
if (!truecolour_equal(a->truecolour, b->truecolour))
|
2019-09-08 19:29:00 +00:00
|
|
|
return false;
|
2004-10-14 16:42:43 +00:00
|
|
|
if (a->chr != bchr)
|
2019-09-08 19:29:00 +00:00
|
|
|
return false;
|
2004-12-17 12:55:12 +00:00
|
|
|
if ((a->attr &~ DATTR_MASK) != (battr &~ DATTR_MASK))
|
2019-09-08 19:29:00 +00:00
|
|
|
return false;
|
2004-10-14 16:42:43 +00:00
|
|
|
while (a->cc_next || b->cc_next) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (!a->cc_next || !b->cc_next)
|
|
|
|
return false; /* one cc-list ends, other does not */
|
|
|
|
a += a->cc_next;
|
|
|
|
b += b->cc_next;
|
|
|
|
if (a->chr != b->chr)
|
|
|
|
return false;
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
2018-10-29 19:50:29 +00:00
|
|
|
return true;
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static bool termchars_equal(termchar *a, termchar *b)
|
2004-10-14 16:42:43 +00:00
|
|
|
{
|
|
|
|
return termchars_equal_override(a, b, b->chr, b->attr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy a character cell. (Requires a pointer to the destination
|
|
|
|
* termline, so as to access its free list.)
|
|
|
|
*/
|
|
|
|
static void copy_termchar(termline *destline, int x, termchar *src)
|
|
|
|
{
|
|
|
|
clear_cc(destline, x);
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
destline->chars[x] = *src; /* copy everything except cc-list */
|
2004-10-14 16:42:43 +00:00
|
|
|
destline->chars[x].cc_next = 0; /* and make sure this is zero */
|
|
|
|
|
|
|
|
while (src->cc_next) {
|
2019-09-08 19:29:00 +00:00
|
|
|
src += src->cc_next;
|
|
|
|
add_cc(destline, x, src->chr);
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
2004-10-15 08:22:49 +00:00
|
|
|
|
2005-04-01 13:25:13 +00:00
|
|
|
#ifdef TERM_CC_DIAGS
|
|
|
|
cc_check(destline);
|
|
|
|
#endif
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move a character cell within its termline.
|
|
|
|
*/
|
|
|
|
static void move_termchar(termline *line, termchar *dest, termchar *src)
|
|
|
|
{
|
|
|
|
/* First clear the cc list from the original char, just in case. */
|
|
|
|
clear_cc(line, dest - line->chars);
|
|
|
|
|
|
|
|
/* Move the character cell and adjust its cc_next. */
|
2019-09-08 19:29:00 +00:00
|
|
|
*dest = *src; /* copy everything except cc-list */
|
2004-10-14 16:42:43 +00:00
|
|
|
if (src->cc_next)
|
2019-09-08 19:29:00 +00:00
|
|
|
dest->cc_next = src->cc_next - (dest-src);
|
2004-10-14 16:42:43 +00:00
|
|
|
|
|
|
|
/* Ensure the original cell doesn't have a cc list. */
|
|
|
|
src->cc_next = 0;
|
2004-10-15 08:22:49 +00:00
|
|
|
|
2005-04-01 13:25:13 +00:00
|
|
|
#ifdef TERM_CC_DIAGS
|
|
|
|
cc_check(line);
|
|
|
|
#endif
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
#ifndef NO_SCROLLBACK_COMPRESSION
|
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
/*
|
|
|
|
* Compress and decompress a termline into an RLE-based format for
|
|
|
|
* storing in scrollback. (Since scrollback almost never needs to
|
|
|
|
* be modified and exists in huge quantities, this is a sensible
|
|
|
|
* tradeoff, particularly since it allows us to continue adding
|
|
|
|
* features to the main termchar structure without proportionally
|
|
|
|
* bloating the terminal emulator's memory footprint unless those
|
|
|
|
* features are in constant use.)
|
|
|
|
*/
|
2018-12-01 10:25:46 +00:00
|
|
|
static void makerle(strbuf *b, termline *ldata,
|
2019-09-08 19:29:00 +00:00
|
|
|
void (*makeliteral)(strbuf *b, termchar *c,
|
|
|
|
unsigned long *state))
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
int hdrpos, hdrsize, n, prevlen, prevpos, thislen, thispos;
|
|
|
|
bool prev2;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termchar *c = ldata->chars;
|
|
|
|
unsigned long state = 0, oldstate;
|
|
|
|
|
|
|
|
n = ldata->cols;
|
|
|
|
|
|
|
|
hdrpos = b->len;
|
|
|
|
hdrsize = 0;
|
2018-12-01 10:25:46 +00:00
|
|
|
put_byte(b, 0);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
prevlen = prevpos = 0;
|
2018-10-29 19:50:29 +00:00
|
|
|
prev2 = false;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
while (n-- > 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
thispos = b->len;
|
|
|
|
makeliteral(b, c++, &state);
|
|
|
|
thislen = b->len - thispos;
|
|
|
|
if (thislen == prevlen &&
|
|
|
|
!memcmp(b->u + prevpos, b->u + thispos, thislen)) {
|
|
|
|
/*
|
|
|
|
* This literal precisely matches the previous one.
|
|
|
|
* Turn it into a run if it's worthwhile.
|
|
|
|
*
|
|
|
|
* With one-byte literals, it costs us two bytes to
|
|
|
|
* encode a run, plus another byte to write the header
|
|
|
|
* to resume normal output; so a three-element run is
|
|
|
|
* neutral, and anything beyond that is unconditionally
|
|
|
|
* worthwhile. With two-byte literals or more, even a
|
|
|
|
* 2-run is a win.
|
|
|
|
*/
|
|
|
|
if (thislen > 1 || prev2) {
|
|
|
|
int runpos, runlen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It's worth encoding a run. Start at prevpos,
|
|
|
|
* unless hdrsize==0 in which case we can back up
|
|
|
|
* another one and start by overwriting hdrpos.
|
|
|
|
*/
|
|
|
|
|
|
|
|
hdrsize--; /* remove the literal at prevpos */
|
|
|
|
if (prev2) {
|
|
|
|
assert(hdrsize > 0);
|
|
|
|
hdrsize--;
|
|
|
|
prevpos -= prevlen;/* and possibly another one */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdrsize == 0) {
|
|
|
|
assert(prevpos == hdrpos + 1);
|
|
|
|
runpos = hdrpos;
|
2020-01-21 20:16:28 +00:00
|
|
|
strbuf_shrink_to(b, prevpos+prevlen);
|
2019-09-08 19:29:00 +00:00
|
|
|
} else {
|
|
|
|
memmove(b->u + prevpos+1, b->u + prevpos, prevlen);
|
|
|
|
runpos = prevpos;
|
2020-01-21 20:16:28 +00:00
|
|
|
strbuf_shrink_to(b, prevpos+prevlen+1);
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* Terminate the previous run of ordinary
|
|
|
|
* literals.
|
|
|
|
*/
|
|
|
|
assert(hdrsize >= 1 && hdrsize <= 128);
|
|
|
|
b->u[hdrpos] = hdrsize - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
runlen = prev2 ? 3 : 2;
|
|
|
|
|
|
|
|
while (n > 0 && runlen < 129) {
|
|
|
|
int tmppos, tmplen;
|
|
|
|
tmppos = b->len;
|
|
|
|
oldstate = state;
|
|
|
|
makeliteral(b, c, &state);
|
|
|
|
tmplen = b->len - tmppos;
|
Fix memory corruption in scrollback compression.
Introduced by commit 5891142aee5c, in which I invented
strbuf_shrink_to() and went round replacing lots of assignments of the
form 'sb->len = smaller_value' with calls to it. The bug is also in
0.74, because 34a0460f0561 is a cherry-pick of the commit that
introduced it.
The difference between that assignment and strbuf_shrink_to is partly
that the latter checks by assertion that the new length really is
_smaller_ - it doesn't let you accidentally grow a strbuf's length
field beyond the limit of its buffer (or indeed at all). But also,
strbuf_shrink_to re-establishes the strbuf invariant that the text
logically in the buffer is always followed by a zero byte, so that
it's a valid C string.
Unfortunately, in one of the places I made this change, I was storing
binary data in the strbuf (so the terminating NUL is unimportant), and
immediately after decreasing the strbuf's length, I was doing a memcmp
one of whose arguments was the data I'd just chopped off the end of
the strbuf. So it _mattered_ that no random NUL had been splurged over
it.
Specifically, this happened in the run-length encoder used to compress
scrollback data, and had the effect that two components of the
compressed scrollback could be spuriously considered equal, if one of
them started with a legitimate zero byte and the other had a zero byte
written over it by this bug. Thanks to Michael Weller for a nice test
case that demonstrated a compressed scrollback line being decompressed
again as the wrong thing:
"NORMAL TEXT, \033[42mGREEN BACKGROUND\033[0m, NORMAL TEXT AGAIN"
If the above line is printed to the terminal (after being decoded as
if it was a C string literal), then only the words "GREEN BACKGROUND"
get a green background. But after that line is scrolled off the top of
the window, if you find it in the scrollback, then the rest of the
line to the right has also become green-backgrounded due to this bug.
2020-10-27 18:26:06 +00:00
|
|
|
bool match = tmplen == thislen &&
|
|
|
|
!memcmp(b->u + runpos+1, b->u + tmppos, tmplen);
|
2020-01-21 20:16:28 +00:00
|
|
|
strbuf_shrink_to(b, tmppos);
|
Fix memory corruption in scrollback compression.
Introduced by commit 5891142aee5c, in which I invented
strbuf_shrink_to() and went round replacing lots of assignments of the
form 'sb->len = smaller_value' with calls to it. The bug is also in
0.74, because 34a0460f0561 is a cherry-pick of the commit that
introduced it.
The difference between that assignment and strbuf_shrink_to is partly
that the latter checks by assertion that the new length really is
_smaller_ - it doesn't let you accidentally grow a strbuf's length
field beyond the limit of its buffer (or indeed at all). But also,
strbuf_shrink_to re-establishes the strbuf invariant that the text
logically in the buffer is always followed by a zero byte, so that
it's a valid C string.
Unfortunately, in one of the places I made this change, I was storing
binary data in the strbuf (so the terminating NUL is unimportant), and
immediately after decreasing the strbuf's length, I was doing a memcmp
one of whose arguments was the data I'd just chopped off the end of
the strbuf. So it _mattered_ that no random NUL had been splurged over
it.
Specifically, this happened in the run-length encoder used to compress
scrollback data, and had the effect that two components of the
compressed scrollback could be spuriously considered equal, if one of
them started with a legitimate zero byte and the other had a zero byte
written over it by this bug. Thanks to Michael Weller for a nice test
case that demonstrated a compressed scrollback line being decompressed
again as the wrong thing:
"NORMAL TEXT, \033[42mGREEN BACKGROUND\033[0m, NORMAL TEXT AGAIN"
If the above line is printed to the terminal (after being decoded as
if it was a C string literal), then only the words "GREEN BACKGROUND"
get a green background. But after that line is scrolled off the top of
the window, if you find it in the scrollback, then the rest of the
line to the right has also become green-backgrounded due to this bug.
2020-10-27 18:26:06 +00:00
|
|
|
if (!match) {
|
2019-09-08 19:29:00 +00:00
|
|
|
state = oldstate;
|
|
|
|
break; /* run over */
|
|
|
|
}
|
|
|
|
n--, c++, runlen++;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(runlen >= 2 && runlen <= 129);
|
|
|
|
b->u[runpos] = runlen + 0x80 - 2;
|
|
|
|
|
|
|
|
hdrpos = b->len;
|
|
|
|
hdrsize = 0;
|
|
|
|
put_byte(b, 0);
|
|
|
|
/* And ensure this run doesn't interfere with the next. */
|
|
|
|
prevlen = prevpos = 0;
|
|
|
|
prev2 = false;
|
|
|
|
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Just flag that the previous two literals were
|
|
|
|
* identical, in case we find a third identical one
|
|
|
|
* we want to turn into a run.
|
|
|
|
*/
|
|
|
|
prev2 = true;
|
|
|
|
prevlen = thislen;
|
|
|
|
prevpos = thispos;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
prev2 = false;
|
|
|
|
prevlen = thislen;
|
|
|
|
prevpos = thispos;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This character isn't (yet) part of a run. Add it to
|
|
|
|
* hdrsize.
|
|
|
|
*/
|
|
|
|
hdrsize++;
|
|
|
|
if (hdrsize == 128) {
|
|
|
|
b->u[hdrpos] = hdrsize - 1;
|
|
|
|
hdrpos = b->len;
|
|
|
|
hdrsize = 0;
|
|
|
|
put_byte(b, 0);
|
|
|
|
prevlen = prevpos = 0;
|
|
|
|
prev2 = false;
|
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clean up.
|
|
|
|
*/
|
|
|
|
if (hdrsize > 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
assert(hdrsize <= 128);
|
|
|
|
b->u[hdrpos] = hdrsize - 1;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else {
|
2020-01-21 20:16:28 +00:00
|
|
|
strbuf_shrink_to(b, hdrpos);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-01 10:25:46 +00:00
|
|
|
static void makeliteral_chr(strbuf *b, termchar *c, unsigned long *state)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* My encoding for characters is UTF-8-like, in that it stores
|
|
|
|
* 7-bit ASCII in one byte and uses high-bit-set bytes as
|
|
|
|
* introducers to indicate a longer sequence. However, it's
|
|
|
|
* unlike UTF-8 in that it doesn't need to be able to
|
|
|
|
* resynchronise, and therefore I don't want to waste two bits
|
|
|
|
* per byte on having recognisable continuation characters.
|
|
|
|
* Also I don't want to rule out the possibility that I may one
|
|
|
|
* day use values 0x80000000-0xFFFFFFFF for interesting
|
|
|
|
* purposes, so unlike UTF-8 I need a full 32-bit range.
|
|
|
|
* Accordingly, here is my encoding:
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
* 00000000-0000007F: 0xxxxxxx (but see below)
|
|
|
|
* 00000080-00003FFF: 10xxxxxx xxxxxxxx
|
|
|
|
* 00004000-001FFFFF: 110xxxxx xxxxxxxx xxxxxxxx
|
|
|
|
* 00200000-0FFFFFFF: 1110xxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
|
|
* 10000000-FFFFFFFF: 11110ZZZ xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
* (`Z' is like `x' but is always going to be zero since the
|
|
|
|
* values I'm encoding don't go above 2^32. In principle the
|
|
|
|
* five-byte form of the encoding could extend to 2^35, and
|
|
|
|
* there could be six-, seven-, eight- and nine-byte forms as
|
|
|
|
* well to allow up to 64-bit values to be encoded. But that's
|
|
|
|
* completely unnecessary for these purposes!)
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
* The encoding as written above would be very simple, except
|
|
|
|
* that 7-bit ASCII can occur in several different ways in the
|
|
|
|
* terminal data; sometimes it crops up in the D800 page
|
|
|
|
* (CSET_ASCII) but at other times it's in the 0000 page (real
|
|
|
|
* Unicode). Therefore, this encoding is actually _stateful_:
|
|
|
|
* the one-byte encoding of 00-7F actually indicates `reuse the
|
|
|
|
* upper three bytes of the last character', and to encode an
|
|
|
|
* absolute value of 00-7F you need to use the two-byte form
|
|
|
|
* instead.
|
|
|
|
*/
|
|
|
|
if ((c->chr & ~0x7F) == *state) {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, (unsigned char)(c->chr & 0x7F));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else if (c->chr < 0x4000) {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, (unsigned char)(((c->chr >> 8) & 0x3F) | 0x80));
|
|
|
|
put_byte(b, (unsigned char)(c->chr & 0xFF));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else if (c->chr < 0x200000) {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, (unsigned char)(((c->chr >> 16) & 0x1F) | 0xC0));
|
|
|
|
put_uint16(b, c->chr & 0xFFFF);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else if (c->chr < 0x10000000) {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, (unsigned char)(((c->chr >> 24) & 0x0F) | 0xE0));
|
|
|
|
put_byte(b, (unsigned char)((c->chr >> 16) & 0xFF));
|
|
|
|
put_uint16(b, c->chr & 0xFFFF);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, 0xF0);
|
|
|
|
put_uint32(b, c->chr);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
*state = c->chr & ~0xFF;
|
|
|
|
}
|
2018-12-01 10:25:46 +00:00
|
|
|
static void makeliteral_attr(strbuf *b, termchar *c, unsigned long *state)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* My encoding for attributes is 16-bit-granular and assumes
|
|
|
|
* that the top bit of the word is never required. I either
|
|
|
|
* store a two-byte value with the top bit clear (indicating
|
|
|
|
* just that value), or a four-byte value with the top bit set
|
|
|
|
* (indicating the same value with its top bit clear).
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2004-11-28 15:13:34 +00:00
|
|
|
* However, first I permute the bits of the attribute value, so
|
|
|
|
* that the eight bits of colour (four in each of fg and bg)
|
|
|
|
* which are never non-zero unless xterm 256-colour mode is in
|
|
|
|
* use are placed higher up the word than everything else. This
|
|
|
|
* ensures that attribute values remain 16-bit _unless_ the
|
|
|
|
* user uses extended colour.
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
*/
|
2004-11-28 15:13:34 +00:00
|
|
|
unsigned attr, colourbits;
|
|
|
|
|
|
|
|
attr = c->attr;
|
|
|
|
|
|
|
|
assert(ATTR_BGSHIFT > ATTR_FGSHIFT);
|
|
|
|
|
|
|
|
colourbits = (attr >> (ATTR_BGSHIFT + 4)) & 0xF;
|
|
|
|
colourbits <<= 4;
|
|
|
|
colourbits |= (attr >> (ATTR_FGSHIFT + 4)) & 0xF;
|
|
|
|
|
|
|
|
attr = (((attr >> (ATTR_BGSHIFT + 8)) << (ATTR_BGSHIFT + 4)) |
|
2019-09-08 19:29:00 +00:00
|
|
|
(attr & ((1 << (ATTR_BGSHIFT + 4))-1)));
|
2004-11-28 15:13:34 +00:00
|
|
|
attr = (((attr >> (ATTR_FGSHIFT + 8)) << (ATTR_FGSHIFT + 4)) |
|
2019-09-08 19:29:00 +00:00
|
|
|
(attr & ((1 << (ATTR_FGSHIFT + 4))-1)));
|
2004-11-28 15:13:34 +00:00
|
|
|
|
|
|
|
attr |= (colourbits << (32-9));
|
|
|
|
|
|
|
|
if (attr < 0x8000) {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, (unsigned char)((attr >> 8) & 0xFF));
|
|
|
|
put_byte(b, (unsigned char)(attr & 0xFF));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, (unsigned char)(((attr >> 24) & 0x7F) | 0x80));
|
|
|
|
put_byte(b, (unsigned char)((attr >> 16) & 0xFF));
|
|
|
|
put_byte(b, (unsigned char)((attr >> 8) & 0xFF));
|
|
|
|
put_byte(b, (unsigned char)(attr & 0xFF));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-01 10:25:46 +00:00
|
|
|
static void makeliteral_truecolour(strbuf *b, termchar *c, unsigned long *state)
|
2017-09-30 16:32:32 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Put the used parts of the colour info into the buffer.
|
|
|
|
*/
|
2018-12-01 10:25:46 +00:00
|
|
|
put_byte(b, ((c->truecolour.fg.enabled ? 1 : 0) |
|
2022-08-03 19:48:46 +00:00
|
|
|
(c->truecolour.bg.enabled ? 2 : 0)));
|
2017-09-30 16:32:32 +00:00
|
|
|
if (c->truecolour.fg.enabled) {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, c->truecolour.fg.r);
|
|
|
|
put_byte(b, c->truecolour.fg.g);
|
|
|
|
put_byte(b, c->truecolour.fg.b);
|
2017-09-30 16:32:32 +00:00
|
|
|
}
|
|
|
|
if (c->truecolour.bg.enabled) {
|
2019-09-08 19:29:00 +00:00
|
|
|
put_byte(b, c->truecolour.bg.r);
|
|
|
|
put_byte(b, c->truecolour.bg.g);
|
|
|
|
put_byte(b, c->truecolour.bg.b);
|
2017-09-30 16:32:32 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-01 10:25:46 +00:00
|
|
|
static void makeliteral_cc(strbuf *b, termchar *c, unsigned long *state)
|
2004-10-14 16:42:43 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* For combining characters, I just encode a bunch of ordinary
|
|
|
|
* chars using makeliteral_chr, and terminate with a \0
|
|
|
|
* character (which I know won't come up as a combining char
|
|
|
|
* itself).
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2004-10-14 16:42:43 +00:00
|
|
|
* I don't use the stateful encoding in makeliteral_chr.
|
|
|
|
*/
|
|
|
|
unsigned long zstate;
|
|
|
|
termchar z;
|
|
|
|
|
|
|
|
while (c->cc_next) {
|
2019-09-08 19:29:00 +00:00
|
|
|
c += c->cc_next;
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
assert(c->chr != 0);
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
zstate = 0;
|
|
|
|
makeliteral_chr(b, c, &zstate);
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
z.chr = 0;
|
|
|
|
zstate = 0;
|
|
|
|
makeliteral_chr(b, &z, &zstate);
|
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2018-12-01 10:25:46 +00:00
|
|
|
typedef struct compressed_scrollback_line {
|
|
|
|
size_t len;
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
/* compressed data follows after this */
|
2018-12-01 10:25:46 +00:00
|
|
|
} compressed_scrollback_line;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
static termline *decompressline_no_free(compressed_scrollback_line *line);
|
2018-12-01 10:25:46 +00:00
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
static compressed_scrollback_line *compressline_no_free(termline *ldata)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
2018-12-01 10:25:46 +00:00
|
|
|
strbuf *b = strbuf_new();
|
|
|
|
|
|
|
|
/* Leave space for the header structure */
|
|
|
|
strbuf_append(b, sizeof(compressed_scrollback_line));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* First, store the column count, 7 bits at a time, least
|
|
|
|
* significant `digit' first, with the high bit set on all but
|
|
|
|
* the last.
|
|
|
|
*/
|
|
|
|
{
|
2019-09-08 19:29:00 +00:00
|
|
|
int n = ldata->cols;
|
|
|
|
while (n >= 128) {
|
|
|
|
put_byte(b, (unsigned char)((n & 0x7F) | 0x80));
|
|
|
|
n >>= 7;
|
|
|
|
}
|
|
|
|
put_byte(b, (unsigned char)(n));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
* Next store the lattrs; same principle. We add one extra bit to
|
|
|
|
* this to indicate the trust state of the line.
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
*/
|
|
|
|
{
|
2019-09-08 19:29:00 +00:00
|
|
|
int n = ldata->lattr | (ldata->trusted ? 0x10000 : 0);
|
|
|
|
while (n >= 128) {
|
|
|
|
put_byte(b, (unsigned char)((n & 0x7F) | 0x80));
|
|
|
|
n >>= 7;
|
|
|
|
}
|
|
|
|
put_byte(b, (unsigned char)(n));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we store a sequence of separate run-length encoded
|
|
|
|
* fragments, each containing exactly as many symbols as there
|
|
|
|
* are columns in the ldata.
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
* All of these have a common basic format:
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
* - a byte 00-7F indicates that X+1 literals follow it
|
2019-09-08 19:29:00 +00:00
|
|
|
* - a byte 80-FF indicates that a single literal follows it
|
|
|
|
* and expects to be repeated (X-0x80)+2 times.
|
|
|
|
*
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
* The format of the `literals' varies between the fragments.
|
|
|
|
*/
|
|
|
|
makerle(b, ldata, makeliteral_chr);
|
|
|
|
makerle(b, ldata, makeliteral_attr);
|
2017-09-30 16:32:32 +00:00
|
|
|
makerle(b, ldata, makeliteral_truecolour);
|
2004-10-14 16:42:43 +00:00
|
|
|
makerle(b, ldata, makeliteral_cc);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2020-10-27 18:15:12 +00:00
|
|
|
size_t linelen = b->len - sizeof(compressed_scrollback_line);
|
|
|
|
compressed_scrollback_line *line =
|
|
|
|
(compressed_scrollback_line *)strbuf_to_str(b);
|
|
|
|
line->len = linelen;
|
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
/*
|
|
|
|
* Diagnostics: ensure that the compressed data really does
|
|
|
|
* decompress to the right thing.
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2005-04-01 13:25:13 +00:00
|
|
|
* This is a bit performance-heavy for production code.
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
*/
|
2005-04-01 13:25:13 +00:00
|
|
|
#ifdef TERM_CC_DIAGS
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
#ifndef CHECK_SB_COMPRESSION
|
|
|
|
{
|
2019-09-08 19:29:00 +00:00
|
|
|
termline *dcl;
|
|
|
|
int i;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC_SB_COMPRESSION
|
2019-09-08 19:29:00 +00:00
|
|
|
for (i = 0; i < b->len; i++) {
|
|
|
|
printf(" %02x ", b->data[i]);
|
|
|
|
}
|
|
|
|
printf("\n");
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
#endif
|
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
dcl = decompressline_no_free(line);
|
2019-09-08 19:29:00 +00:00
|
|
|
assert(ldata->cols == dcl->cols);
|
|
|
|
assert(ldata->lattr == dcl->lattr);
|
|
|
|
for (i = 0; i < ldata->cols; i++)
|
|
|
|
assert(termchars_equal(&ldata->chars[i], &dcl->chars[i]));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC_SB_COMPRESSION
|
2019-09-08 19:29:00 +00:00
|
|
|
printf("%d cols (%d bytes) -> %d bytes (factor of %g)\n",
|
|
|
|
ldata->cols, 4 * ldata->cols, dused,
|
|
|
|
(double)dused / (4 * ldata->cols));
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
#endif
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
freetermline(dcl);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
#endif
|
2005-04-01 13:25:13 +00:00
|
|
|
#endif /* TERM_CC_DIAGS */
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2018-12-01 10:25:46 +00:00
|
|
|
return line;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
static compressed_scrollback_line *compressline_and_free(termline *ldata)
|
|
|
|
{
|
|
|
|
compressed_scrollback_line *cline = compressline_no_free(ldata);
|
|
|
|
freetermline(ldata);
|
|
|
|
return cline;
|
|
|
|
}
|
|
|
|
|
2018-12-01 10:25:46 +00:00
|
|
|
static void readrle(BinarySource *bs, termline *ldata,
|
2019-09-08 19:29:00 +00:00
|
|
|
void (*readliteral)(BinarySource *bs, termchar *c,
|
|
|
|
termline *ldata, unsigned long *state))
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
|
|
|
int n = 0;
|
|
|
|
unsigned long state = 0;
|
|
|
|
|
|
|
|
while (n < ldata->cols) {
|
2019-09-08 19:29:00 +00:00
|
|
|
int hdr = get_byte(bs);
|
|
|
|
|
|
|
|
if (hdr >= 0x80) {
|
|
|
|
/* A run. */
|
|
|
|
|
|
|
|
size_t pos = bs->pos, count = hdr + 2 - 0x80;
|
|
|
|
while (count--) {
|
|
|
|
assert(n < ldata->cols);
|
|
|
|
bs->pos = pos;
|
|
|
|
readliteral(bs, ldata->chars + n, ldata, &state);
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Just a sequence of consecutive literals. */
|
|
|
|
|
|
|
|
int count = hdr + 1;
|
|
|
|
while (count--) {
|
|
|
|
assert(n < ldata->cols);
|
|
|
|
readliteral(bs, ldata->chars + n, ldata, &state);
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(n == ldata->cols);
|
|
|
|
}
|
2018-12-01 10:25:46 +00:00
|
|
|
static void readliteral_chr(BinarySource *bs, termchar *c, termline *ldata,
|
2019-09-08 19:29:00 +00:00
|
|
|
unsigned long *state)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
|
|
|
int byte;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 00000000-0000007F: 0xxxxxxx
|
|
|
|
* 00000080-00003FFF: 10xxxxxx xxxxxxxx
|
|
|
|
* 00004000-001FFFFF: 110xxxxx xxxxxxxx xxxxxxxx
|
|
|
|
* 00200000-0FFFFFFF: 1110xxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
|
|
* 10000000-FFFFFFFF: 11110ZZZ xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
|
|
*/
|
|
|
|
|
2018-12-01 10:25:46 +00:00
|
|
|
byte = get_byte(bs);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
if (byte < 0x80) {
|
2019-09-08 19:29:00 +00:00
|
|
|
c->chr = byte | *state;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else if (byte < 0xC0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
c->chr = (byte &~ 0xC0) << 8;
|
|
|
|
c->chr |= get_byte(bs);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else if (byte < 0xE0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
c->chr = (byte &~ 0xE0) << 16;
|
|
|
|
c->chr |= get_uint16(bs);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else if (byte < 0xF0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
c->chr = (byte &~ 0xF0) << 24;
|
|
|
|
c->chr |= get_byte(bs) << 16;
|
|
|
|
c->chr |= get_uint16(bs);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
assert(byte == 0xF0);
|
|
|
|
c->chr = get_uint32(bs);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
*state = c->chr & ~0xFF;
|
|
|
|
}
|
2018-12-01 10:25:46 +00:00
|
|
|
static void readliteral_attr(BinarySource *bs, termchar *c, termline *ldata,
|
2019-09-08 19:29:00 +00:00
|
|
|
unsigned long *state)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
2004-11-28 15:13:34 +00:00
|
|
|
unsigned val, attr, colourbits;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2018-12-01 10:25:46 +00:00
|
|
|
val = get_uint16(bs);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
if (val >= 0x8000) {
|
2019-09-08 19:29:00 +00:00
|
|
|
val &= ~0x8000;
|
|
|
|
val <<= 16;
|
|
|
|
val |= get_uint16(bs);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
2004-11-28 15:13:34 +00:00
|
|
|
colourbits = (val >> (32-9)) & 0xFF;
|
|
|
|
attr = (val & ((1<<(32-9))-1));
|
|
|
|
|
|
|
|
attr = (((attr >> (ATTR_FGSHIFT + 4)) << (ATTR_FGSHIFT + 8)) |
|
2019-09-08 19:29:00 +00:00
|
|
|
(attr & ((1 << (ATTR_FGSHIFT + 4))-1)));
|
2004-11-28 15:13:34 +00:00
|
|
|
attr = (((attr >> (ATTR_BGSHIFT + 4)) << (ATTR_BGSHIFT + 8)) |
|
2019-09-08 19:29:00 +00:00
|
|
|
(attr & ((1 << (ATTR_BGSHIFT + 4))-1)));
|
2004-11-28 15:13:34 +00:00
|
|
|
|
|
|
|
attr |= (colourbits >> 4) << (ATTR_BGSHIFT + 4);
|
|
|
|
attr |= (colourbits & 0xF) << (ATTR_FGSHIFT + 4);
|
|
|
|
|
|
|
|
c->attr = attr;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
2018-12-01 10:25:46 +00:00
|
|
|
static void readliteral_truecolour(
|
|
|
|
BinarySource *bs, termchar *c, termline *ldata, unsigned long *state)
|
2017-09-30 16:32:32 +00:00
|
|
|
{
|
2018-12-01 10:25:46 +00:00
|
|
|
int flags = get_byte(bs);
|
2017-09-30 16:32:32 +00:00
|
|
|
|
|
|
|
if (flags & 1) {
|
2018-10-29 19:50:29 +00:00
|
|
|
c->truecolour.fg.enabled = true;
|
2019-09-08 19:29:00 +00:00
|
|
|
c->truecolour.fg.r = get_byte(bs);
|
|
|
|
c->truecolour.fg.g = get_byte(bs);
|
|
|
|
c->truecolour.fg.b = get_byte(bs);
|
2017-09-30 16:32:32 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
c->truecolour.fg = optionalrgb_none;
|
2017-09-30 16:32:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & 2) {
|
2018-10-29 19:50:29 +00:00
|
|
|
c->truecolour.bg.enabled = true;
|
2019-09-08 19:29:00 +00:00
|
|
|
c->truecolour.bg.r = get_byte(bs);
|
|
|
|
c->truecolour.bg.g = get_byte(bs);
|
|
|
|
c->truecolour.bg.b = get_byte(bs);
|
2017-09-30 16:32:32 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
c->truecolour.bg = optionalrgb_none;
|
2017-09-30 16:32:32 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-01 10:25:46 +00:00
|
|
|
static void readliteral_cc(BinarySource *bs, termchar *c, termline *ldata,
|
2019-09-08 19:29:00 +00:00
|
|
|
unsigned long *state)
|
2004-10-14 16:42:43 +00:00
|
|
|
{
|
|
|
|
termchar n;
|
|
|
|
unsigned long zstate;
|
|
|
|
int x = c - ldata->chars;
|
|
|
|
|
|
|
|
c->cc_next = 0;
|
|
|
|
|
|
|
|
while (1) {
|
2019-09-08 19:29:00 +00:00
|
|
|
zstate = 0;
|
|
|
|
readliteral_chr(bs, &n, ldata, &zstate);
|
|
|
|
if (!n.chr)
|
|
|
|
break;
|
|
|
|
add_cc(ldata, x, n.chr);
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
static termline *decompressline_no_free(compressed_scrollback_line *line)
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
{
|
|
|
|
int ncols, byte, shift;
|
2018-12-01 10:25:46 +00:00
|
|
|
BinarySource bs[1];
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline *ldata;
|
|
|
|
|
2018-12-01 10:25:46 +00:00
|
|
|
BinarySource_BARE_INIT(bs, line+1, line->len);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* First read in the column count.
|
|
|
|
*/
|
|
|
|
ncols = shift = 0;
|
|
|
|
do {
|
2019-09-08 19:29:00 +00:00
|
|
|
byte = get_byte(bs);
|
|
|
|
ncols |= (byte & 0x7F) << shift;
|
|
|
|
shift += 7;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} while (byte & 0x80);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now create the output termline.
|
|
|
|
*/
|
|
|
|
ldata = snew(termline);
|
|
|
|
ldata->chars = snewn(ncols, termchar);
|
2004-10-14 16:42:43 +00:00
|
|
|
ldata->cols = ldata->size = ncols;
|
2018-10-29 19:50:29 +00:00
|
|
|
ldata->temporary = true;
|
2004-10-14 16:42:43 +00:00
|
|
|
ldata->cc_free = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We must set all the cc pointers in ldata->chars to 0 right
|
|
|
|
* now, so that cc diagnostics that verify the integrity of the
|
|
|
|
* whole line will make sense while we're in the middle of
|
|
|
|
* building it up.
|
|
|
|
*/
|
|
|
|
{
|
2019-09-08 19:29:00 +00:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < ldata->cols; i++)
|
|
|
|
ldata->chars[i].cc_next = 0;
|
2004-10-14 16:42:43 +00:00
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now read in the lattr.
|
|
|
|
*/
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
int lattr = shift = 0;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
do {
|
2019-09-08 19:29:00 +00:00
|
|
|
byte = get_byte(bs);
|
|
|
|
lattr |= (byte & 0x7F) << shift;
|
|
|
|
shift += 7;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} while (byte & 0x80);
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
ldata->lattr = lattr & 0xFFFF;
|
|
|
|
ldata->trusted = (lattr & 0x10000) != 0;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we read in each of the RLE streams in turn.
|
|
|
|
*/
|
2018-12-01 10:25:46 +00:00
|
|
|
readrle(bs, ldata, readliteral_chr);
|
|
|
|
readrle(bs, ldata, readliteral_attr);
|
|
|
|
readrle(bs, ldata, readliteral_truecolour);
|
|
|
|
readrle(bs, ldata, readliteral_cc);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2018-12-01 10:25:46 +00:00
|
|
|
/* And we always expect that we ended up exactly at the end of the
|
|
|
|
* compressed data. */
|
|
|
|
assert(!get_err(bs));
|
|
|
|
assert(get_avail(bs) == 0);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
return ldata;
|
|
|
|
}
|
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
static inline void free_compressed_line(compressed_scrollback_line *cline)
|
|
|
|
{
|
|
|
|
sfree(cline);
|
|
|
|
}
|
|
|
|
|
|
|
|
static termline *decompressline_and_free(compressed_scrollback_line *cline)
|
|
|
|
{
|
|
|
|
termline *ldata = decompressline_no_free(cline);
|
|
|
|
free_compressed_line(cline);
|
|
|
|
return ldata;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* NO_SCROLLBACK_COMPRESSION */
|
|
|
|
|
|
|
|
static termline *duptermline(termline *oldline)
|
|
|
|
{
|
|
|
|
termline *newline = snew(termline);
|
|
|
|
*newline = *oldline; /* copy the POD structure fields */
|
|
|
|
newline->chars = snewn(newline->size, termchar);
|
|
|
|
for (int j = 0; j < newline->size; j++)
|
|
|
|
newline->chars[j] = oldline->chars[j];
|
|
|
|
return newline;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef termline compressed_scrollback_line;
|
|
|
|
|
|
|
|
static inline compressed_scrollback_line *compressline_and_free(
|
|
|
|
termline *ldata)
|
|
|
|
{
|
|
|
|
return ldata;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline compressed_scrollback_line *compressline_no_free(termline *ldata)
|
|
|
|
{
|
|
|
|
return duptermline(ldata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline termline *decompressline_no_free(
|
|
|
|
compressed_scrollback_line *line)
|
|
|
|
{
|
|
|
|
/* This will return a line without the 'temporary' flag, which
|
|
|
|
* means that unlineptr() is already set up to avoid freeing it */
|
|
|
|
return line;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline termline *decompressline_and_free(
|
|
|
|
compressed_scrollback_line *line)
|
|
|
|
{
|
|
|
|
/* Same as decompressline_no_free, because the caller will free
|
|
|
|
* our returned termline, and that does all the freeing necessary */
|
|
|
|
return line;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_compressed_line(compressed_scrollback_line *line)
|
|
|
|
{
|
|
|
|
freetermline(line);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* NO_SCROLLBACK_COMPRESSION */
|
|
|
|
|
2001-05-17 08:53:13 +00:00
|
|
|
/*
|
|
|
|
* Resize a line to make it `cols' columns wide.
|
|
|
|
*/
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
static void resizeline(Terminal *term, termline *line, int cols)
|
2001-05-17 08:53:13 +00:00
|
|
|
{
|
2004-10-15 10:48:27 +00:00
|
|
|
int i, oldcols;
|
2001-05-17 08:53:13 +00:00
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
if (line->cols != cols) {
|
2004-10-15 10:48:27 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
oldcols = line->cols;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This line is the wrong length, which probably means it
|
|
|
|
* hasn't been accessed since a resize. Resize it now.
|
|
|
|
*
|
|
|
|
* First, go through all the characters that will be thrown
|
|
|
|
* out in the resize (if we're shrinking the line) and
|
|
|
|
* return their cc lists to the cc free list.
|
|
|
|
*/
|
|
|
|
for (i = cols; i < oldcols; i++)
|
|
|
|
clear_cc(line, i);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're shrinking the line, we now bodily move the
|
|
|
|
* entire cc section from where it started to where it now
|
|
|
|
* needs to be. (We have to do this before the resize, so
|
|
|
|
* that the data we're copying is still there. However, if
|
|
|
|
* we're expanding, we have to wait until _after_ the
|
|
|
|
* resize so that the space we're copying into is there.)
|
|
|
|
*/
|
|
|
|
if (cols < oldcols)
|
|
|
|
memmove(line->chars + cols, line->chars + oldcols,
|
|
|
|
(line->size - line->cols) * TSIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now do the actual resize, leaving the _same_ amount of
|
|
|
|
* cc space as there was to begin with.
|
|
|
|
*/
|
|
|
|
line->size += cols - oldcols;
|
|
|
|
line->chars = sresize(line->chars, line->size, TTYPE);
|
|
|
|
line->cols = cols;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're expanding the line, _now_ we move the cc
|
|
|
|
* section.
|
|
|
|
*/
|
|
|
|
if (cols > oldcols)
|
|
|
|
memmove(line->chars + cols, line->chars + oldcols,
|
|
|
|
(line->size - line->cols) * TSIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Go through what's left of the original line, and adjust
|
|
|
|
* the first cc_next pointer in each list. (All the
|
|
|
|
* subsequent ones are still valid because they are
|
|
|
|
* relative offsets within the cc block.) Also do the same
|
|
|
|
* to the head of the cc_free list.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < oldcols && i < cols; i++)
|
|
|
|
if (line->chars[i].cc_next)
|
|
|
|
line->chars[i].cc_next += cols - oldcols;
|
|
|
|
if (line->cc_free)
|
|
|
|
line->cc_free += cols - oldcols;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And finally fill in the new space with erase chars. (We
|
|
|
|
* don't have to worry about cc lists here, because we
|
|
|
|
* _know_ the erase char doesn't have one.)
|
|
|
|
*/
|
|
|
|
for (i = oldcols; i < cols; i++)
|
|
|
|
line->chars[i] = term->basic_erase_char;
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2005-04-01 13:25:13 +00:00
|
|
|
#ifdef TERM_CC_DIAGS
|
2019-09-08 19:29:00 +00:00
|
|
|
cc_check(line);
|
2005-04-01 13:25:13 +00:00
|
|
|
#endif
|
2001-05-17 08:53:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-03-06 12:51:12 +00:00
|
|
|
/*
|
|
|
|
* Get the number of lines in the scrollback.
|
|
|
|
*/
|
|
|
|
static int sblines(Terminal *term)
|
|
|
|
{
|
|
|
|
int sblines = count234(term->scrollback);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
if (term->erase_to_scrollback &&
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_which && term->alt_screen) {
|
2022-08-03 19:48:46 +00:00
|
|
|
sblines += term->alt_sblines;
|
2003-03-06 12:51:12 +00:00
|
|
|
}
|
|
|
|
return sblines;
|
|
|
|
}
|
|
|
|
|
2018-04-03 17:50:12 +00:00
|
|
|
static void null_line_error(Terminal *term, int y, int lineno,
|
|
|
|
tree234 *whichtree, int treeindex,
|
|
|
|
const char *varname)
|
|
|
|
{
|
|
|
|
modalfatalbox("%s==NULL in terminal.c\n"
|
|
|
|
"lineno=%d y=%d w=%d h=%d\n"
|
|
|
|
"count(scrollback=%p)=%d\n"
|
|
|
|
"count(screen=%p)=%d\n"
|
|
|
|
"count(alt=%p)=%d alt_sblines=%d\n"
|
|
|
|
"whichtree=%p treeindex=%d\n"
|
|
|
|
"commitid=%s\n\n"
|
|
|
|
"Please contact <putty@projects.tartarus.org> "
|
|
|
|
"and pass on the above information.",
|
|
|
|
varname, lineno, y, term->cols, term->rows,
|
|
|
|
term->scrollback, count234(term->scrollback),
|
|
|
|
term->screen, count234(term->screen),
|
|
|
|
term->alt_screen, count234(term->alt_screen),
|
|
|
|
term->alt_sblines, whichtree, treeindex, commitid);
|
|
|
|
}
|
|
|
|
|
2023-03-04 17:04:07 +00:00
|
|
|
static inline int checkscr(int y, int lineno)
|
|
|
|
{
|
|
|
|
if (y < 0)
|
|
|
|
modalfatalbox("screen line %d < 0 in terminal.c:%d", y, lineno);
|
|
|
|
return y;
|
|
|
|
}
|
|
|
|
|
2001-04-28 15:32:25 +00:00
|
|
|
/*
|
|
|
|
* Retrieve a line of the screen or of the scrollback, according to
|
|
|
|
* whether the y coordinate is non-negative or negative
|
|
|
|
* (respectively).
|
|
|
|
*/
|
2023-03-04 17:04:07 +00:00
|
|
|
static termline *lineptr(Terminal *term, int y, int lineno)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline *line;
|
2001-04-28 15:32:25 +00:00
|
|
|
tree234 *whichtree;
|
2001-05-17 08:53:13 +00:00
|
|
|
int treeindex;
|
2001-04-28 15:32:25 +00:00
|
|
|
|
|
|
|
if (y >= 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
whichtree = term->screen;
|
|
|
|
treeindex = y;
|
2001-04-28 15:32:25 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
int altlines = 0;
|
|
|
|
|
|
|
|
if (term->erase_to_scrollback &&
|
|
|
|
term->alt_which && term->alt_screen) {
|
|
|
|
altlines = term->alt_sblines;
|
|
|
|
}
|
|
|
|
if (y < -altlines) {
|
|
|
|
whichtree = term->scrollback;
|
|
|
|
treeindex = y + altlines + count234(term->scrollback);
|
|
|
|
} else {
|
|
|
|
whichtree = term->alt_screen;
|
|
|
|
treeindex = y + term->alt_sblines;
|
|
|
|
/* treeindex = y + count234(term->alt_screen); */
|
|
|
|
}
|
2001-04-28 15:32:25 +00:00
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
if (whichtree == term->scrollback) {
|
2019-09-08 19:29:00 +00:00
|
|
|
compressed_scrollback_line *cline = index234(whichtree, treeindex);
|
2018-04-03 17:50:12 +00:00
|
|
|
if (!cline)
|
|
|
|
null_line_error(term, y, lineno, whichtree, treeindex, "cline");
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
line = decompressline_no_free(cline);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
line = index234(whichtree, treeindex);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
2001-04-28 15:32:25 +00:00
|
|
|
|
|
|
|
/* We assume that we don't screw up and retrieve something out of range. */
|
2018-04-03 17:50:12 +00:00
|
|
|
if (line == NULL)
|
|
|
|
null_line_error(term, y, lineno, whichtree, treeindex, "line");
|
2001-04-28 15:32:25 +00:00
|
|
|
assert(line != NULL);
|
|
|
|
|
2014-07-23 21:48:02 +00:00
|
|
|
/*
|
|
|
|
* Here we resize lines to _at least_ the right length, but we
|
|
|
|
* don't truncate them. Truncation is done as a side effect of
|
|
|
|
* modifying the line.
|
|
|
|
*
|
|
|
|
* The point of this policy is to try to arrange that resizing the
|
|
|
|
* terminal window repeatedly - e.g. successive steps in an X11
|
|
|
|
* opaque window-resize drag, or resizing as a side effect of
|
|
|
|
* retiling by tiling WMs such as xmonad - does not throw away
|
|
|
|
* data gratuitously. Specifically, we want a sequence of resize
|
|
|
|
* operations with no terminal output between them to have the
|
|
|
|
* same effect as a single resize to the ultimate terminal size,
|
|
|
|
* and also (for the case in which xmonad narrows a window that's
|
|
|
|
* scrolling things) we want scrolling up new text at the bottom
|
|
|
|
* of a narrowed window to avoid truncating lines further up when
|
|
|
|
* the window is re-widened.
|
|
|
|
*/
|
|
|
|
if (term->cols > line->cols)
|
|
|
|
resizeline(term, line, term->cols);
|
2001-04-28 15:32:25 +00:00
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
return line;
|
2001-04-28 15:32:25 +00:00
|
|
|
}
|
2001-05-06 14:35:20 +00:00
|
|
|
|
2023-03-04 17:04:07 +00:00
|
|
|
/*
|
|
|
|
* Macro wrappers for lineptr. The distinction between lineptr and
|
|
|
|
* scrlineptr is that lineptr can retrieve any line, from the screen
|
|
|
|
* _or_ from scrollback, and in return, you have to call unlineptr
|
|
|
|
* when you're done with it, in case it was a dynamically allocated
|
|
|
|
* line decompressed from scrollback that needs freeing. But
|
|
|
|
* scrlineptr will only retrieve lines from the active screen (and
|
|
|
|
* enforces this by an assertion), which means it's always just
|
|
|
|
* returning a pointer to an existing unpacked termline, and you don't
|
|
|
|
* have to call unlineptr afterwards. So drawing code (which might
|
|
|
|
* need the scrollback) will have to call lineptr/unlineptr, but
|
|
|
|
* update code during term_out can call scrlineptr.
|
|
|
|
*
|
|
|
|
* The 'assertion' in scrlineptr is done using a helper function that
|
|
|
|
* returns the input column number, which allows this macro to avoid
|
|
|
|
* double-evaluating its argument.
|
|
|
|
*/
|
|
|
|
#define lineptr(x) (lineptr)(term,x,__LINE__)
|
|
|
|
#define scrlineptr(x) (lineptr)(term,checkscr(x,__LINE__),__LINE__)
|
2023-03-04 17:19:24 +00:00
|
|
|
#define unlineptr(line) term_release_line(line)
|
|
|
|
|
|
|
|
/* Wrapper for external use (e.g. tests), without the __LINE__ parameter */
|
|
|
|
termline *term_get_line(Terminal *term, int y) { return lineptr(y); }
|
2002-10-22 16:11:33 +00:00
|
|
|
|
2014-07-23 21:48:02 +00:00
|
|
|
/*
|
|
|
|
* Coerce a termline to the terminal's current width. Unlike the
|
|
|
|
* optional resize in lineptr() above, this is potentially destructive
|
|
|
|
* of text, since it can shrink as well as grow the line.
|
|
|
|
*
|
|
|
|
* We call this whenever a termline is actually going to be modified.
|
|
|
|
* Helpfully, putting a single call to this function in check_boundary
|
|
|
|
* deals with _nearly_ all such cases, leaving only a few things like
|
|
|
|
* bulk erase and ESC#8 to handle separately.
|
|
|
|
*/
|
|
|
|
static void check_line_size(Terminal *term, termline *line)
|
|
|
|
{
|
|
|
|
if (term->cols != line->cols) /* trivial optimisation */
|
|
|
|
resizeline(term, line, term->cols);
|
|
|
|
}
|
|
|
|
|
2004-11-27 13:20:21 +00:00
|
|
|
static void term_schedule_tblink(Terminal *term);
|
|
|
|
static void term_schedule_cblink(Terminal *term);
|
Apply UPDATE_DELAY in arrears, not in advance.
The original aim of the rate limit was to avoid having too many
updates per second. I implemented this by a deferment mechanism: when
any change occurs that makes the terminal want an update, it instead
sets a timer to go off after UPDATE_DELAY (1/50 second), and does the
update at the end of that interval.
Now it's done the other way round: if there has not been an update
within the last UPDATE_DELAY, then we can simply do an update _right
now_, in immediate response to whatever triggered it. And _then_ we
set a timer to track a cooldown period, within which any further
requests for updates will be deferred until the end of the cooldown.
This mechanism should still rate-limit updates, but now the latency in
normal interactive use should be lowered, because terminal updates in
response to keystrokes (which typically arrive separated by more than
UPDATE_DELAY) can now each be enacted as soon as possible after the
triggering keystroke.
This also reverses (in the common case) the slowdown of non-textual
window modifications introduced by the previous commit, in which lots
of them were brought under the umbrella of term_update and therefore
became subject to UPDATE_DELAY. Now they'll only be delayed in
conditions of high traffic, and not in interactive use.
2021-02-07 19:59:21 +00:00
|
|
|
static void term_update_callback(void *ctx);
|
2004-11-27 13:20:21 +00:00
|
|
|
|
2012-09-18 21:42:48 +00:00
|
|
|
static void term_timer(void *ctx, unsigned long now)
|
2004-11-27 13:20:21 +00:00
|
|
|
{
|
|
|
|
Terminal *term = (Terminal *)ctx;
|
|
|
|
|
2012-09-18 21:42:48 +00:00
|
|
|
if (term->tblink_pending && now == term->next_tblink) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->tblinker = !term->tblinker;
|
|
|
|
term->tblink_pending = false;
|
|
|
|
term_schedule_tblink(term);
|
Apply UPDATE_DELAY in arrears, not in advance.
The original aim of the rate limit was to avoid having too many
updates per second. I implemented this by a deferment mechanism: when
any change occurs that makes the terminal want an update, it instead
sets a timer to go off after UPDATE_DELAY (1/50 second), and does the
update at the end of that interval.
Now it's done the other way round: if there has not been an update
within the last UPDATE_DELAY, then we can simply do an update _right
now_, in immediate response to whatever triggered it. And _then_ we
set a timer to track a cooldown period, within which any further
requests for updates will be deferred until the end of the cooldown.
This mechanism should still rate-limit updates, but now the latency in
normal interactive use should be lowered, because terminal updates in
response to keystrokes (which typically arrive separated by more than
UPDATE_DELAY) can now each be enacted as soon as possible after the
triggering keystroke.
This also reverses (in the common case) the slowdown of non-textual
window modifications introduced by the previous commit, in which lots
of them were brought under the umbrella of term_update and therefore
became subject to UPDATE_DELAY. Now they'll only be delayed in
conditions of high traffic, and not in interactive use.
2021-02-07 19:59:21 +00:00
|
|
|
term->window_update_pending = true;
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
|
|
|
|
2012-09-18 21:42:48 +00:00
|
|
|
if (term->cblink_pending && now == term->next_cblink) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->cblinker = !term->cblinker;
|
|
|
|
term->cblink_pending = false;
|
|
|
|
term_schedule_cblink(term);
|
Apply UPDATE_DELAY in arrears, not in advance.
The original aim of the rate limit was to avoid having too many
updates per second. I implemented this by a deferment mechanism: when
any change occurs that makes the terminal want an update, it instead
sets a timer to go off after UPDATE_DELAY (1/50 second), and does the
update at the end of that interval.
Now it's done the other way round: if there has not been an update
within the last UPDATE_DELAY, then we can simply do an update _right
now_, in immediate response to whatever triggered it. And _then_ we
set a timer to track a cooldown period, within which any further
requests for updates will be deferred until the end of the cooldown.
This mechanism should still rate-limit updates, but now the latency in
normal interactive use should be lowered, because terminal updates in
response to keystrokes (which typically arrive separated by more than
UPDATE_DELAY) can now each be enacted as soon as possible after the
triggering keystroke.
This also reverses (in the common case) the slowdown of non-textual
window modifications introduced by the previous commit, in which lots
of them were brought under the umbrella of term_update and therefore
became subject to UPDATE_DELAY. Now they'll only be delayed in
conditions of high traffic, and not in interactive use.
2021-02-07 19:59:21 +00:00
|
|
|
term->window_update_pending = true;
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
|
|
|
|
2012-09-18 21:42:48 +00:00
|
|
|
if (term->in_vbell && now == term->vbell_end) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->in_vbell = false;
|
Apply UPDATE_DELAY in arrears, not in advance.
The original aim of the rate limit was to avoid having too many
updates per second. I implemented this by a deferment mechanism: when
any change occurs that makes the terminal want an update, it instead
sets a timer to go off after UPDATE_DELAY (1/50 second), and does the
update at the end of that interval.
Now it's done the other way round: if there has not been an update
within the last UPDATE_DELAY, then we can simply do an update _right
now_, in immediate response to whatever triggered it. And _then_ we
set a timer to track a cooldown period, within which any further
requests for updates will be deferred until the end of the cooldown.
This mechanism should still rate-limit updates, but now the latency in
normal interactive use should be lowered, because terminal updates in
response to keystrokes (which typically arrive separated by more than
UPDATE_DELAY) can now each be enacted as soon as possible after the
triggering keystroke.
This also reverses (in the common case) the slowdown of non-textual
window modifications introduced by the previous commit, in which lots
of them were brought under the umbrella of term_update and therefore
became subject to UPDATE_DELAY. Now they'll only be delayed in
conditions of high traffic, and not in interactive use.
2021-02-07 19:59:21 +00:00
|
|
|
term->window_update_pending = true;
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
|
|
|
|
Apply UPDATE_DELAY in arrears, not in advance.
The original aim of the rate limit was to avoid having too many
updates per second. I implemented this by a deferment mechanism: when
any change occurs that makes the terminal want an update, it instead
sets a timer to go off after UPDATE_DELAY (1/50 second), and does the
update at the end of that interval.
Now it's done the other way round: if there has not been an update
within the last UPDATE_DELAY, then we can simply do an update _right
now_, in immediate response to whatever triggered it. And _then_ we
set a timer to track a cooldown period, within which any further
requests for updates will be deferred until the end of the cooldown.
This mechanism should still rate-limit updates, but now the latency in
normal interactive use should be lowered, because terminal updates in
response to keystrokes (which typically arrive separated by more than
UPDATE_DELAY) can now each be enacted as soon as possible after the
triggering keystroke.
This also reverses (in the common case) the slowdown of non-textual
window modifications introduced by the previous commit, in which lots
of them were brought under the umbrella of term_update and therefore
became subject to UPDATE_DELAY. Now they'll only be delayed in
conditions of high traffic, and not in interactive use.
2021-02-07 19:59:21 +00:00
|
|
|
if (term->window_update_cooldown &&
|
|
|
|
now == term->window_update_cooldown_end) {
|
|
|
|
term->window_update_cooldown = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (term->window_update_pending)
|
|
|
|
term_update_callback(term);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void term_update_callback(void *ctx)
|
|
|
|
{
|
|
|
|
Terminal *term = (Terminal *)ctx;
|
|
|
|
if (!term->window_update_pending)
|
|
|
|
return;
|
|
|
|
if (!term->window_update_cooldown) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term_update(term);
|
Apply UPDATE_DELAY in arrears, not in advance.
The original aim of the rate limit was to avoid having too many
updates per second. I implemented this by a deferment mechanism: when
any change occurs that makes the terminal want an update, it instead
sets a timer to go off after UPDATE_DELAY (1/50 second), and does the
update at the end of that interval.
Now it's done the other way round: if there has not been an update
within the last UPDATE_DELAY, then we can simply do an update _right
now_, in immediate response to whatever triggered it. And _then_ we
set a timer to track a cooldown period, within which any further
requests for updates will be deferred until the end of the cooldown.
This mechanism should still rate-limit updates, but now the latency in
normal interactive use should be lowered, because terminal updates in
response to keystrokes (which typically arrive separated by more than
UPDATE_DELAY) can now each be enacted as soon as possible after the
triggering keystroke.
This also reverses (in the common case) the slowdown of non-textual
window modifications introduced by the previous commit, in which lots
of them were brought under the umbrella of term_update and therefore
became subject to UPDATE_DELAY. Now they'll only be delayed in
conditions of high traffic, and not in interactive use.
2021-02-07 19:59:21 +00:00
|
|
|
term->window_update_cooldown = true;
|
|
|
|
term->window_update_cooldown_end = schedule_timer(
|
|
|
|
UPDATE_DELAY, term_timer, term);
|
|
|
|
}
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
|
|
|
|
2004-11-28 15:13:34 +00:00
|
|
|
static void term_schedule_update(Terminal *term)
|
|
|
|
{
|
|
|
|
if (!term->window_update_pending) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->window_update_pending = true;
|
Apply UPDATE_DELAY in arrears, not in advance.
The original aim of the rate limit was to avoid having too many
updates per second. I implemented this by a deferment mechanism: when
any change occurs that makes the terminal want an update, it instead
sets a timer to go off after UPDATE_DELAY (1/50 second), and does the
update at the end of that interval.
Now it's done the other way round: if there has not been an update
within the last UPDATE_DELAY, then we can simply do an update _right
now_, in immediate response to whatever triggered it. And _then_ we
set a timer to track a cooldown period, within which any further
requests for updates will be deferred until the end of the cooldown.
This mechanism should still rate-limit updates, but now the latency in
normal interactive use should be lowered, because terminal updates in
response to keystrokes (which typically arrive separated by more than
UPDATE_DELAY) can now each be enacted as soon as possible after the
triggering keystroke.
This also reverses (in the common case) the slowdown of non-textual
window modifications introduced by the previous commit, in which lots
of them were brought under the umbrella of term_update and therefore
became subject to UPDATE_DELAY. Now they'll only be delayed in
conditions of high traffic, and not in interactive use.
2021-02-07 19:59:21 +00:00
|
|
|
queue_toplevel_callback(term_update_callback, term);
|
2004-11-28 15:13:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-11-27 13:20:21 +00:00
|
|
|
/*
|
Rationalise the code that resets terminal scrollback.
Recently I encountered a CLI tool that took tens of seconds to run,
and produced no _visible_ output, but wrote ESC[0m to the terminal a
few times during its operation. (Probably by mistake. In other modes
it does print colourful messages, so I expect a 'reset colour' call
was accidentally outside the 'if' statement containing the rest of the
diagnostic it followed. Or something along those lines.)
I noticed this because every ESC[0m reset my pterm scrollback to the
bottom, which wasn't very helpful, and was unintentional on pterm's
part (as _well_ as on the part of the tool). But I can fix pterm!
At first glance the code _looked_ sensible: terminal.c contains calls
to seen_disp_event(term) whenever terminal output does something that
requires a redraw of the terminal window. Those are also the updates
that should count as 'reset scrollback on display activity'. And
ESC[0m, along with the rest of the SGR handler, correctly contained no
such call. So how did a display update happen at all?
The code was confusingly tangled up with the code that responds to
terminal activity by resetting the phase of the blinking cursor (if
any). term_reset_cblink() was calling seen_disp_event() (when surely
it should be the other way round!), and also, term_reset_cblink() was
called whenever _any_ terminal output data arrived. That combination
meant that any byte output to the terminal at all turned out to count
as display activity, whether or not it changed the screen contents.
Additionally, the other scrollback-reset flag, 'reset scrollback on
keypress', was handled by calling seen_disp_event() from the keyboard
handler. But display events and keyboard events are supposed to be
_independent_ potential causes of scrollback resets - it doesn't make
any sense to handle one by treating it as the other!
So I've reorganised the code completely:
- the seen_disp_event *flag* is now gone. Instead, the
seen_disp_event function tests the scroll_on_disp flag, and if set,
resets the scroll position immediately and sets the general
'scrollbar needs updating' flag.
- keyboard input is handled by doing exactly the same thing except
testing the scroll_on_key flag, so the two systems are properly
independent. That code calls term_schedule_update so that the
terminal will be redrawn as a result of the scroll, but doesn't
also call seen_disp_event() for the rest of the full treatment.
- the term_update code that does the scrollbar update is much
simpler, since now it only needs to test that one flag.
- I also had to set that flag explicitly in scroll() so that the
scrollbar would still be updated as a result of the scrollback size
changing. I think that must have been happening entirely by
accident before.
- term_reset_cblink is subsumed into seen_disp_event, so that only
_substantive_ display updates cause the cursor blink phase to reset
to the start of the solid period.
Result: if programs output no-op sequences like ESC[0m, or if you
press keys that don't echo, then the cursor will carry on blinking
normally, and (if you don't also have scroll_on_key set) the
scrollback won't be reset. And the code is slightly shorter than it
was before, and hopefully more sensible too.
(However, other classes of no-op activity _will_ still cause a cursor
blink phase change and a scrollback reset, such as sending a
cursor-positioning sequence that puts the cursor in the same place it
was already - even something as simple as ^M when already at the start
of the line. It might be nice to fix that, but it's much more
difficult: you'd have to either put a complicated and error-prone test
at every seen_disp_event call site, or else expensively diff the
entire visible terminal state against how it was before. And to avoid
a nondeterministic dependency on the terminal update cooldown, that
diff would have to be done at the granularity of individual control
sequences rather than a bounded number of times a second. I'd rather
not!)
2023-09-03 08:29:56 +00:00
|
|
|
* Call this whenever the terminal window state changes, to queue an
|
|
|
|
* update. This also resets the phase of cursor blinking, so that the
|
|
|
|
* cursor remains visible as it moves with the output, and sets a flag
|
|
|
|
* to indicate that if we have the 'reset scrollback on display
|
|
|
|
* activity' setting enabled, then we should activate it.
|
2004-11-27 13:20:21 +00:00
|
|
|
*/
|
|
|
|
static void seen_disp_event(Terminal *term)
|
|
|
|
{
|
Rationalise the code that resets terminal scrollback.
Recently I encountered a CLI tool that took tens of seconds to run,
and produced no _visible_ output, but wrote ESC[0m to the terminal a
few times during its operation. (Probably by mistake. In other modes
it does print colourful messages, so I expect a 'reset colour' call
was accidentally outside the 'if' statement containing the rest of the
diagnostic it followed. Or something along those lines.)
I noticed this because every ESC[0m reset my pterm scrollback to the
bottom, which wasn't very helpful, and was unintentional on pterm's
part (as _well_ as on the part of the tool). But I can fix pterm!
At first glance the code _looked_ sensible: terminal.c contains calls
to seen_disp_event(term) whenever terminal output does something that
requires a redraw of the terminal window. Those are also the updates
that should count as 'reset scrollback on display activity'. And
ESC[0m, along with the rest of the SGR handler, correctly contained no
such call. So how did a display update happen at all?
The code was confusingly tangled up with the code that responds to
terminal activity by resetting the phase of the blinking cursor (if
any). term_reset_cblink() was calling seen_disp_event() (when surely
it should be the other way round!), and also, term_reset_cblink() was
called whenever _any_ terminal output data arrived. That combination
meant that any byte output to the terminal at all turned out to count
as display activity, whether or not it changed the screen contents.
Additionally, the other scrollback-reset flag, 'reset scrollback on
keypress', was handled by calling seen_disp_event() from the keyboard
handler. But display events and keyboard events are supposed to be
_independent_ potential causes of scrollback resets - it doesn't make
any sense to handle one by treating it as the other!
So I've reorganised the code completely:
- the seen_disp_event *flag* is now gone. Instead, the
seen_disp_event function tests the scroll_on_disp flag, and if set,
resets the scroll position immediately and sets the general
'scrollbar needs updating' flag.
- keyboard input is handled by doing exactly the same thing except
testing the scroll_on_key flag, so the two systems are properly
independent. That code calls term_schedule_update so that the
terminal will be redrawn as a result of the scroll, but doesn't
also call seen_disp_event() for the rest of the full treatment.
- the term_update code that does the scrollbar update is much
simpler, since now it only needs to test that one flag.
- I also had to set that flag explicitly in scroll() so that the
scrollbar would still be updated as a result of the scrollback size
changing. I think that must have been happening entirely by
accident before.
- term_reset_cblink is subsumed into seen_disp_event, so that only
_substantive_ display updates cause the cursor blink phase to reset
to the start of the solid period.
Result: if programs output no-op sequences like ESC[0m, or if you
press keys that don't echo, then the cursor will carry on blinking
normally, and (if you don't also have scroll_on_key set) the
scrollback won't be reset. And the code is slightly shorter than it
was before, and hopefully more sensible too.
(However, other classes of no-op activity _will_ still cause a cursor
blink phase change and a scrollback reset, such as sending a
cursor-positioning sequence that puts the cursor in the same place it
was already - even something as simple as ^M when already at the start
of the line. It might be nice to fix that, but it's much more
difficult: you'd have to either put a complicated and error-prone test
at every seen_disp_event call site, or else expensively diff the
entire visible terminal state against how it was before. And to avoid
a nondeterministic dependency on the terminal update cooldown, that
diff would have to be done at the granularity of individual control
sequences rather than a bounded number of times a second. I'd rather
not!)
2023-09-03 08:29:56 +00:00
|
|
|
if (term->scroll_on_disp) {
|
|
|
|
term->disptop = 0;
|
|
|
|
term->win_scrollbar_update_pending = true;
|
|
|
|
}
|
|
|
|
term->cblinker = true;
|
|
|
|
term->cblink_pending = false;
|
|
|
|
term_schedule_cblink(term);
|
2004-11-28 15:13:34 +00:00
|
|
|
term_schedule_update(term);
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call when the terminal's blinking-text settings change, or when
|
|
|
|
* a text blink has just occurred.
|
|
|
|
*/
|
|
|
|
static void term_schedule_tblink(Terminal *term)
|
|
|
|
{
|
|
|
|
if (term->blink_is_real) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (!term->tblink_pending)
|
|
|
|
term->next_tblink = schedule_timer(TBLINK_DELAY, term_timer, term);
|
|
|
|
term->tblink_pending = true;
|
2004-11-27 13:20:21 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->tblinker = true; /* reset when not in use */
|
|
|
|
term->tblink_pending = false;
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Likewise with cursor blinks.
|
|
|
|
*/
|
|
|
|
static void term_schedule_cblink(Terminal *term)
|
|
|
|
{
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
if (term->blink_cur && term->has_focus) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (!term->cblink_pending)
|
|
|
|
term->next_cblink = schedule_timer(CBLINK_DELAY, term_timer, term);
|
|
|
|
term->cblink_pending = true;
|
2004-11-27 13:20:21 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->cblinker = true; /* reset when not in use */
|
|
|
|
term->cblink_pending = false;
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call to begin a visual bell.
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void term_schedule_vbell(Terminal *term, bool already_started,
|
2019-09-08 19:29:00 +00:00
|
|
|
long startpoint)
|
2004-11-27 13:20:21 +00:00
|
|
|
{
|
|
|
|
long ticks_already_gone;
|
|
|
|
|
|
|
|
if (already_started)
|
2019-09-08 19:29:00 +00:00
|
|
|
ticks_already_gone = GETTICKCOUNT() - startpoint;
|
2004-11-27 13:20:21 +00:00
|
|
|
else
|
2019-09-08 19:29:00 +00:00
|
|
|
ticks_already_gone = 0;
|
2004-11-27 13:20:21 +00:00
|
|
|
|
|
|
|
if (ticks_already_gone < VBELL_DELAY) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->in_vbell = true;
|
|
|
|
term->vbell_end = schedule_timer(VBELL_DELAY - ticks_already_gone,
|
|
|
|
term_timer, term);
|
2004-11-27 13:20:21 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->in_vbell = false;
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Set up power-on settings for the terminal.
|
2006-02-19 14:59:48 +00:00
|
|
|
* If 'clear' is false, don't actually clear the primary screen, and
|
|
|
|
* position the cursor below the last non-blank line (scrolling if
|
|
|
|
* necessary).
|
1999-01-08 13:02:13 +00:00
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void power_on(Terminal *term, bool clear)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2002-10-22 16:11:33 +00:00
|
|
|
term->alt_x = term->alt_y = 0;
|
|
|
|
term->savecurs.x = term->savecurs.y = 0;
|
2006-08-15 12:45:21 +00:00
|
|
|
term->alt_savecurs.x = term->alt_savecurs.y = 0;
|
2002-10-22 16:11:33 +00:00
|
|
|
term->alt_t = term->marg_t = 0;
|
|
|
|
if (term->rows != -1)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_b = term->marg_b = term->rows - 1;
|
1999-01-08 13:02:13 +00:00
|
|
|
else
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_b = term->marg_b = 0;
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->cols != -1) {
|
2019-09-08 19:29:00 +00:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < term->cols; i++)
|
|
|
|
term->tabs[i] = (i % 8 == 0 ? true : false);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
2018-10-29 19:57:31 +00:00
|
|
|
term->alt_om = term->dec_om = conf_get_bool(term->conf, CONF_dec_om);
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->alt_ins = false;
|
|
|
|
term->insert = false;
|
|
|
|
term->alt_wnext = false;
|
|
|
|
term->wrapnext = false;
|
|
|
|
term->save_wnext = false;
|
|
|
|
term->alt_save_wnext = false;
|
2018-10-29 19:57:31 +00:00
|
|
|
term->alt_wrap = term->wrap = conf_get_bool(term->conf, CONF_wrap_mode);
|
2006-08-15 12:45:21 +00:00
|
|
|
term->alt_cset = term->cset = term->save_cset = term->alt_save_cset = 0;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->alt_utf = false;
|
|
|
|
term->utf = false;
|
|
|
|
term->save_utf = false;
|
|
|
|
term->alt_save_utf = false;
|
2019-03-04 20:53:41 +00:00
|
|
|
term->utf8.state = 0;
|
2006-08-15 12:45:21 +00:00
|
|
|
term->alt_sco_acs = term->sco_acs =
|
|
|
|
term->save_sco_acs = term->alt_save_sco_acs = 0;
|
|
|
|
term->cset_attr[0] = term->cset_attr[1] =
|
|
|
|
term->save_csattr = term->alt_save_csattr = CSET_ASCII;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->rvideo = false;
|
2018-10-29 19:50:29 +00:00
|
|
|
term->in_vbell = false;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->cursor_on = true;
|
|
|
|
term->big_cursor = false;
|
2006-08-15 22:48:01 +00:00
|
|
|
term->default_attr = term->save_attr =
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_save_attr = term->curr_attr = ATTR_DEFAULT;
|
2017-10-01 19:59:00 +00:00
|
|
|
term->curr_truecolour.fg = term->curr_truecolour.bg = optionalrgb_none;
|
2017-10-08 12:45:08 +00:00
|
|
|
term->save_truecolour = term->alt_save_truecolour = term->curr_truecolour;
|
2018-10-29 19:57:31 +00:00
|
|
|
term->app_cursor_keys = conf_get_bool(term->conf, CONF_app_cursor);
|
|
|
|
term->app_keypad_keys = conf_get_bool(term->conf, CONF_app_keypad);
|
|
|
|
term->use_bce = conf_get_bool(term->conf, CONF_bce);
|
|
|
|
term->blink_is_real = conf_get_bool(term->conf, CONF_blinktext);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
term->erase_char = term->basic_erase_char;
|
2002-10-22 16:11:33 +00:00
|
|
|
term->alt_which = 0;
|
|
|
|
term_print_finish(term);
|
2008-12-20 18:52:09 +00:00
|
|
|
term->xterm_mouse = 0;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->xterm_extended_mouse = false;
|
|
|
|
term->urxvt_extended_mouse = false;
|
2019-12-20 13:56:58 +00:00
|
|
|
term->raw_mouse_reported_x = 0;
|
|
|
|
term->raw_mouse_reported_y = 0;
|
2018-10-29 19:50:29 +00:00
|
|
|
win_set_raw_mouse_mode(term->win, false);
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_pointer_shape_pending = true;
|
|
|
|
term->win_pointer_shape_raw = false;
|
2018-10-29 19:50:29 +00:00
|
|
|
term->bracketed_paste = false;
|
2019-06-17 19:21:06 +00:00
|
|
|
term->srm_echo = false;
|
1999-01-08 13:02:13 +00:00
|
|
|
{
|
2019-09-08 19:29:00 +00:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < 256; i++)
|
|
|
|
term->wordness[i] = conf_get_int_int(term->conf, CONF_wordness, i);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->screen) {
|
2019-09-08 19:29:00 +00:00
|
|
|
swap_screen(term, 1, false, false);
|
|
|
|
erase_lots(term, false, true, true);
|
|
|
|
swap_screen(term, 0, false, false);
|
|
|
|
if (clear)
|
|
|
|
erase_lots(term, false, true, true);
|
|
|
|
term->curs.y = find_last_nonempty_line(term, term->screen) + 1;
|
|
|
|
if (term->curs.y == term->rows) {
|
|
|
|
term->curs.y--;
|
|
|
|
scroll(term, 0, term->rows - 1, 1, true);
|
|
|
|
}
|
2006-02-19 14:59:48 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curs.y = 0;
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
2006-02-19 14:59:48 +00:00
|
|
|
term->curs.x = 0;
|
2004-11-27 13:20:21 +00:00
|
|
|
term_schedule_tblink(term);
|
|
|
|
term_schedule_cblink(term);
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term_schedule_update(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force a screen update.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
void term_update(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2018-10-29 19:50:29 +00:00
|
|
|
term->window_update_pending = false;
|
2004-11-27 13:20:21 +00:00
|
|
|
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
if (term->win_move_pending) {
|
|
|
|
win_move(term->win, term->win_move_pending_x,
|
|
|
|
term->win_move_pending_y);
|
|
|
|
term->win_move_pending = false;
|
|
|
|
}
|
Suspend terminal output while a window resize is pending.
This is the payoff from the last few commits of refactoring. It fixes
the following race-condition bug in terminal application redraw:
* server sends a window-resizing escape sequence
* terminal requests a window resize from the front end
* server sends further escape sequences to perform a redraw of some
full-screen application, which assume that the window resize has
occurred and the window is already its new size
* terminal processes all those sequences in the context of the old
window size, while the front end is still thinking
* window resize completes in the front end and term_size() tells the
terminal it now has its new size, but it's too late, the screen
redraw has made a total mess.
(Perhaps the server might even send its window resize + followup
redraw all in one SSH packet, so that it's all queued in term->inbuf
in one go.)
As far as I can see, handling of this case has been broken more or
less forever in the GTK frontend (where window resizes are inherently
asynchronous due to the way X11 works, and we've never done anything
to compensate for that). On Windows, where window size is changed via
SetWindowPos which is synchronous, it used to work, but broke in
commit d74308e90e3813a (i.e. between 0.74 and 0.75), which made all
the ancillary window updates run on the same delayed-action timer as
ordinary text display.
So, it's time to fix it, and I think now I should be able to fix it in
GTK as well as on Windows.
Now, as soon as we've set the term->win_resize_pending flag (in
response to a resize escape sequence), the next return to the top of
the main loop in term_out will terminate output processing early,
leaving any further terminal data still in the term->inbuf bufchain.
Once we get a term_size() callback from the front end telling us our
new size, we reset term->win_resize_pending, which unblocks output
processing again, and we also queue a toplevel callback to have
another try at term_out() so that it will be unblocked promptly.
To implement this I've changed term->win_resize_pending from a bool
into a three-state enumeration, so that we can tell the difference
between 'pending' in the sense of not yet having sent our resize
request to the frontend, and in the sense of waiting for the frontend
to reply. That way, a window resize from the GUI user at least won't
be mistaken for the response to our resize request if it arrives in
the former state. (It can still be mistaken for one in the latter
case, but if the user is resizing the window at the same time as the
server-side application is doing critically size-dependent redrawing,
I don't think there can be any reasonable expectation of nothing going
wrong.)
As mentioned in the previous commit, some failure modes under X11 (in
particular the window manager process getting wedged in some way) can
result in no response being received to a ConfigureWindow request. In
that situation, it seems to me that we really _shouldn't_ sit there
waiting forever - perhaps it's technically the WM's fault and not
ours, but what kind of X window are you most likely to want to use to
do emergency WM repair? A terminal window, of course, so it would be
exceptionally unhelpful to make any terminal window stop working
completely in this situation! Hence, there's a fallback timeout in
terminal.c, so that if we don't receive a response in _too_ long,
we'll assume one is not forthcoming, and resume processing terminal
data at the old window size. The fallback timeout is set to 5 seconds,
following existing practice in libXt (DEFAULT_WM_TIMEOUT).
2021-12-19 10:37:02 +00:00
|
|
|
if (term->win_resize_pending == WIN_RESIZE_NEED_SEND) {
|
|
|
|
term->win_resize_pending = WIN_RESIZE_AWAIT_REPLY;
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
win_request_resize(term->win, term->win_resize_pending_w,
|
|
|
|
term->win_resize_pending_h);
|
|
|
|
}
|
|
|
|
if (term->win_zorder_pending) {
|
|
|
|
win_set_zorder(term->win, term->win_zorder_top);
|
|
|
|
term->win_zorder_pending = false;
|
|
|
|
}
|
|
|
|
if (term->win_minimise_pending) {
|
|
|
|
win_set_minimised(term->win, term->win_minimise_enable);
|
|
|
|
term->win_minimise_pending = false;
|
|
|
|
}
|
|
|
|
if (term->win_maximise_pending) {
|
|
|
|
win_set_maximised(term->win, term->win_maximise_enable);
|
|
|
|
term->win_maximise_pending = false;
|
|
|
|
}
|
|
|
|
if (term->win_title_pending) {
|
win_set_[icon_]title: send a codepage along with the string.
While fixing the previous commit I noticed that window titles don't
actually _work_ properly if you change the terminal character set,
because the text accumulated in the OSC string buffer is sent to the
TermWin as raw bytes, with no indication of what character set it
should interpret them as. You might get lucky if you happened to
choose the right charset (in particular, UTF-8 is a common default),
but if you change the charset half way through a run, then there's
certainly no way the frontend will know to interpret two window titles
sent before and after the change in two different charsets.
So, now win_set_title() and win_set_icon_title() both include a
codepage parameter along with the byte string, and it's up to them to
translate the provided window title from that encoding to whatever the
local window system expects to receive.
On Windows, that's wide-string Unicode, so we can just use the
existing dup_mb_to_wc utility function. But in GTK, it's UTF-8, so I
had to write an extra utility function to encode a wide string as
UTF-8.
2021-10-16 12:20:44 +00:00
|
|
|
win_set_title(term->win, term->window_title,
|
|
|
|
term->wintitle_codepage);
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_title_pending = false;
|
|
|
|
}
|
|
|
|
if (term->win_icon_title_pending) {
|
win_set_[icon_]title: send a codepage along with the string.
While fixing the previous commit I noticed that window titles don't
actually _work_ properly if you change the terminal character set,
because the text accumulated in the OSC string buffer is sent to the
TermWin as raw bytes, with no indication of what character set it
should interpret them as. You might get lucky if you happened to
choose the right charset (in particular, UTF-8 is a common default),
but if you change the charset half way through a run, then there's
certainly no way the frontend will know to interpret two window titles
sent before and after the change in two different charsets.
So, now win_set_title() and win_set_icon_title() both include a
codepage parameter along with the byte string, and it's up to them to
translate the provided window title from that encoding to whatever the
local window system expects to receive.
On Windows, that's wide-string Unicode, so we can just use the
existing dup_mb_to_wc utility function. But in GTK, it's UTF-8, so I
had to write an extra utility function to encode a wide string as
UTF-8.
2021-10-16 12:20:44 +00:00
|
|
|
win_set_icon_title(term->win, term->icon_title,
|
|
|
|
term->icontitle_codepage);
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_icon_title_pending = false;
|
|
|
|
}
|
|
|
|
if (term->win_pointer_shape_pending) {
|
|
|
|
win_set_raw_mouse_mode_pointer(term->win, term->win_pointer_shape_raw);
|
|
|
|
term->win_pointer_shape_pending = false;
|
|
|
|
}
|
|
|
|
if (term->win_refresh_pending) {
|
|
|
|
win_refresh(term->win);
|
|
|
|
term->win_refresh_pending = false;
|
|
|
|
}
|
|
|
|
if (term->win_palette_pending) {
|
|
|
|
unsigned start = term->win_palette_pending_min;
|
|
|
|
unsigned ncolours = term->win_palette_pending_limit - start;
|
|
|
|
win_palette_set(term->win, start, ncolours, term->palette + start);
|
|
|
|
term->win_palette_pending = false;
|
|
|
|
}
|
|
|
|
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
if (win_setup_draw_ctx(term->win)) {
|
Rationalise the code that resets terminal scrollback.
Recently I encountered a CLI tool that took tens of seconds to run,
and produced no _visible_ output, but wrote ESC[0m to the terminal a
few times during its operation. (Probably by mistake. In other modes
it does print colourful messages, so I expect a 'reset colour' call
was accidentally outside the 'if' statement containing the rest of the
diagnostic it followed. Or something along those lines.)
I noticed this because every ESC[0m reset my pterm scrollback to the
bottom, which wasn't very helpful, and was unintentional on pterm's
part (as _well_ as on the part of the tool). But I can fix pterm!
At first glance the code _looked_ sensible: terminal.c contains calls
to seen_disp_event(term) whenever terminal output does something that
requires a redraw of the terminal window. Those are also the updates
that should count as 'reset scrollback on display activity'. And
ESC[0m, along with the rest of the SGR handler, correctly contained no
such call. So how did a display update happen at all?
The code was confusingly tangled up with the code that responds to
terminal activity by resetting the phase of the blinking cursor (if
any). term_reset_cblink() was calling seen_disp_event() (when surely
it should be the other way round!), and also, term_reset_cblink() was
called whenever _any_ terminal output data arrived. That combination
meant that any byte output to the terminal at all turned out to count
as display activity, whether or not it changed the screen contents.
Additionally, the other scrollback-reset flag, 'reset scrollback on
keypress', was handled by calling seen_disp_event() from the keyboard
handler. But display events and keyboard events are supposed to be
_independent_ potential causes of scrollback resets - it doesn't make
any sense to handle one by treating it as the other!
So I've reorganised the code completely:
- the seen_disp_event *flag* is now gone. Instead, the
seen_disp_event function tests the scroll_on_disp flag, and if set,
resets the scroll position immediately and sets the general
'scrollbar needs updating' flag.
- keyboard input is handled by doing exactly the same thing except
testing the scroll_on_key flag, so the two systems are properly
independent. That code calls term_schedule_update so that the
terminal will be redrawn as a result of the scroll, but doesn't
also call seen_disp_event() for the rest of the full treatment.
- the term_update code that does the scrollbar update is much
simpler, since now it only needs to test that one flag.
- I also had to set that flag explicitly in scroll() so that the
scrollbar would still be updated as a result of the scrollback size
changing. I think that must have been happening entirely by
accident before.
- term_reset_cblink is subsumed into seen_disp_event, so that only
_substantive_ display updates cause the cursor blink phase to reset
to the start of the solid period.
Result: if programs output no-op sequences like ESC[0m, or if you
press keys that don't echo, then the cursor will carry on blinking
normally, and (if you don't also have scroll_on_key set) the
scrollback won't be reset. And the code is slightly shorter than it
was before, and hopefully more sensible too.
(However, other classes of no-op activity _will_ still cause a cursor
blink phase change and a scrollback reset, such as sending a
cursor-positioning sequence that puts the cursor in the same place it
was already - even something as simple as ^M when already at the start
of the line. It might be nice to fix that, but it's much more
difficult: you'd have to either put a complicated and error-prone test
at every seen_disp_event call site, or else expensively diff the
entire visible terminal state against how it was before. And to avoid
a nondeterministic dependency on the terminal update cooldown, that
diff would have to be done at the granularity of individual control
sequences rather than a bounded number of times a second. I'd rather
not!)
2023-09-03 08:29:56 +00:00
|
|
|
if (term->win_scrollbar_update_pending) {
|
|
|
|
term->win_scrollbar_update_pending = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
update_sbar(term);
|
Rationalise the code that resets terminal scrollback.
Recently I encountered a CLI tool that took tens of seconds to run,
and produced no _visible_ output, but wrote ESC[0m to the terminal a
few times during its operation. (Probably by mistake. In other modes
it does print colourful messages, so I expect a 'reset colour' call
was accidentally outside the 'if' statement containing the rest of the
diagnostic it followed. Or something along those lines.)
I noticed this because every ESC[0m reset my pterm scrollback to the
bottom, which wasn't very helpful, and was unintentional on pterm's
part (as _well_ as on the part of the tool). But I can fix pterm!
At first glance the code _looked_ sensible: terminal.c contains calls
to seen_disp_event(term) whenever terminal output does something that
requires a redraw of the terminal window. Those are also the updates
that should count as 'reset scrollback on display activity'. And
ESC[0m, along with the rest of the SGR handler, correctly contained no
such call. So how did a display update happen at all?
The code was confusingly tangled up with the code that responds to
terminal activity by resetting the phase of the blinking cursor (if
any). term_reset_cblink() was calling seen_disp_event() (when surely
it should be the other way round!), and also, term_reset_cblink() was
called whenever _any_ terminal output data arrived. That combination
meant that any byte output to the terminal at all turned out to count
as display activity, whether or not it changed the screen contents.
Additionally, the other scrollback-reset flag, 'reset scrollback on
keypress', was handled by calling seen_disp_event() from the keyboard
handler. But display events and keyboard events are supposed to be
_independent_ potential causes of scrollback resets - it doesn't make
any sense to handle one by treating it as the other!
So I've reorganised the code completely:
- the seen_disp_event *flag* is now gone. Instead, the
seen_disp_event function tests the scroll_on_disp flag, and if set,
resets the scroll position immediately and sets the general
'scrollbar needs updating' flag.
- keyboard input is handled by doing exactly the same thing except
testing the scroll_on_key flag, so the two systems are properly
independent. That code calls term_schedule_update so that the
terminal will be redrawn as a result of the scroll, but doesn't
also call seen_disp_event() for the rest of the full treatment.
- the term_update code that does the scrollbar update is much
simpler, since now it only needs to test that one flag.
- I also had to set that flag explicitly in scroll() so that the
scrollbar would still be updated as a result of the scrollback size
changing. I think that must have been happening entirely by
accident before.
- term_reset_cblink is subsumed into seen_disp_event, so that only
_substantive_ display updates cause the cursor blink phase to reset
to the start of the solid period.
Result: if programs output no-op sequences like ESC[0m, or if you
press keys that don't echo, then the cursor will carry on blinking
normally, and (if you don't also have scroll_on_key set) the
scrollback won't be reset. And the code is slightly shorter than it
was before, and hopefully more sensible too.
(However, other classes of no-op activity _will_ still cause a cursor
blink phase change and a scrollback reset, such as sending a
cursor-positioning sequence that puts the cursor in the same place it
was already - even something as simple as ^M when already at the start
of the line. It might be nice to fix that, but it's much more
difficult: you'd have to either put a complicated and error-prone test
at every seen_disp_event call site, or else expensively diff the
entire visible terminal state against how it was before. And to avoid
a nondeterministic dependency on the terminal update cooldown, that
diff would have to be done at the granularity of individual control
sequences rather than a bounded number of times a second. I'd rather
not!)
2023-09-03 08:29:56 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
do_paint(term);
|
|
|
|
win_set_cursor_pos(
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
term->win, term->curs.x, term->curs.y - term->disptop);
|
2019-09-08 19:29:00 +00:00
|
|
|
win_free_draw_ctx(term->win);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-10-20 13:23:30 +00:00
|
|
|
/*
|
|
|
|
* Called from front end when a keypress occurs, to trigger
|
|
|
|
* anything magical that needs to happen in that situation.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
void term_seen_key_event(Terminal *term)
|
2002-10-20 13:23:30 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* On any keypress, clear the bell overload mechanism
|
|
|
|
* completely, on the grounds that large numbers of
|
|
|
|
* beeps coming from deliberate key action are likely
|
|
|
|
* to be intended (e.g. beeps from filename completion
|
|
|
|
* blocking repeatedly).
|
|
|
|
*/
|
2018-10-29 19:50:29 +00:00
|
|
|
term->beep_overloaded = false;
|
2002-10-22 16:11:33 +00:00
|
|
|
while (term->beephead) {
|
2019-09-08 19:29:00 +00:00
|
|
|
struct beeptime *tmp = term->beephead;
|
|
|
|
term->beephead = tmp->next;
|
|
|
|
sfree(tmp);
|
2002-10-20 13:23:30 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
term->beeptail = NULL;
|
|
|
|
term->nbeeps = 0;
|
2002-10-20 13:23:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset the scrollback on keypress, if we're doing that.
|
|
|
|
*/
|
Rationalise the code that resets terminal scrollback.
Recently I encountered a CLI tool that took tens of seconds to run,
and produced no _visible_ output, but wrote ESC[0m to the terminal a
few times during its operation. (Probably by mistake. In other modes
it does print colourful messages, so I expect a 'reset colour' call
was accidentally outside the 'if' statement containing the rest of the
diagnostic it followed. Or something along those lines.)
I noticed this because every ESC[0m reset my pterm scrollback to the
bottom, which wasn't very helpful, and was unintentional on pterm's
part (as _well_ as on the part of the tool). But I can fix pterm!
At first glance the code _looked_ sensible: terminal.c contains calls
to seen_disp_event(term) whenever terminal output does something that
requires a redraw of the terminal window. Those are also the updates
that should count as 'reset scrollback on display activity'. And
ESC[0m, along with the rest of the SGR handler, correctly contained no
such call. So how did a display update happen at all?
The code was confusingly tangled up with the code that responds to
terminal activity by resetting the phase of the blinking cursor (if
any). term_reset_cblink() was calling seen_disp_event() (when surely
it should be the other way round!), and also, term_reset_cblink() was
called whenever _any_ terminal output data arrived. That combination
meant that any byte output to the terminal at all turned out to count
as display activity, whether or not it changed the screen contents.
Additionally, the other scrollback-reset flag, 'reset scrollback on
keypress', was handled by calling seen_disp_event() from the keyboard
handler. But display events and keyboard events are supposed to be
_independent_ potential causes of scrollback resets - it doesn't make
any sense to handle one by treating it as the other!
So I've reorganised the code completely:
- the seen_disp_event *flag* is now gone. Instead, the
seen_disp_event function tests the scroll_on_disp flag, and if set,
resets the scroll position immediately and sets the general
'scrollbar needs updating' flag.
- keyboard input is handled by doing exactly the same thing except
testing the scroll_on_key flag, so the two systems are properly
independent. That code calls term_schedule_update so that the
terminal will be redrawn as a result of the scroll, but doesn't
also call seen_disp_event() for the rest of the full treatment.
- the term_update code that does the scrollbar update is much
simpler, since now it only needs to test that one flag.
- I also had to set that flag explicitly in scroll() so that the
scrollbar would still be updated as a result of the scrollback size
changing. I think that must have been happening entirely by
accident before.
- term_reset_cblink is subsumed into seen_disp_event, so that only
_substantive_ display updates cause the cursor blink phase to reset
to the start of the solid period.
Result: if programs output no-op sequences like ESC[0m, or if you
press keys that don't echo, then the cursor will carry on blinking
normally, and (if you don't also have scroll_on_key set) the
scrollback won't be reset. And the code is slightly shorter than it
was before, and hopefully more sensible too.
(However, other classes of no-op activity _will_ still cause a cursor
blink phase change and a scrollback reset, such as sending a
cursor-positioning sequence that puts the cursor in the same place it
was already - even something as simple as ^M when already at the start
of the line. It might be nice to fix that, but it's much more
difficult: you'd have to either put a complicated and error-prone test
at every seen_disp_event call site, or else expensively diff the
entire visible terminal state against how it was before. And to avoid
a nondeterministic dependency on the terminal update cooldown, that
diff would have to be done at the granularity of individual control
sequences rather than a bounded number of times a second. I'd rather
not!)
2023-09-03 08:29:56 +00:00
|
|
|
if (term->scroll_on_key && term->disptop != 0) {
|
|
|
|
term->disptop = 0;
|
|
|
|
term->win_scrollbar_update_pending = true;
|
|
|
|
term_schedule_update(term);
|
2002-10-20 13:23:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Same as power_on(), but an external function.
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
void term_pwron(Terminal *term, bool clear)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2006-02-19 14:59:48 +00:00
|
|
|
power_on(term, clear);
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->ldisc) /* cause ldisc to notice changes */
|
|
|
|
ldisc_echoedit_update(term->ldisc);
|
2002-10-22 16:11:33 +00:00
|
|
|
term->disptop = 0;
|
|
|
|
deselect(term);
|
|
|
|
term_update(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
static void set_erase_char(Terminal *term)
|
|
|
|
{
|
|
|
|
term->erase_char = term->basic_erase_char;
|
2017-10-08 12:43:27 +00:00
|
|
|
if (term->use_bce) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->erase_char.attr = (term->curr_attr &
|
|
|
|
(ATTR_FGMASK | ATTR_BGMASK));
|
2017-10-08 12:43:27 +00:00
|
|
|
term->erase_char.truecolour.bg = term->curr_truecolour.bg;
|
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
/*
|
|
|
|
* We copy a bunch of stuff out of the Conf structure into local
|
|
|
|
* fields in the Terminal structure, to avoid the repeated tree234
|
|
|
|
* lookups which would be involved in fetching them from the former
|
|
|
|
* every time.
|
|
|
|
*/
|
2022-09-03 11:02:48 +00:00
|
|
|
static void term_copy_stuff_from_conf(Terminal *term)
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
{
|
2018-10-29 19:57:31 +00:00
|
|
|
term->ansi_colour = conf_get_bool(term->conf, CONF_ansi_colour);
|
2019-03-26 21:13:19 +00:00
|
|
|
term->no_arabicshaping = conf_get_bool(term->conf, CONF_no_arabicshaping);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term->beep = conf_get_int(term->conf, CONF_beep);
|
2018-10-29 19:57:31 +00:00
|
|
|
term->bellovl = conf_get_bool(term->conf, CONF_bellovl);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term->bellovl_n = conf_get_int(term->conf, CONF_bellovl_n);
|
|
|
|
term->bellovl_s = conf_get_int(term->conf, CONF_bellovl_s);
|
|
|
|
term->bellovl_t = conf_get_int(term->conf, CONF_bellovl_t);
|
2019-03-26 21:13:19 +00:00
|
|
|
term->no_bidi = conf_get_bool(term->conf, CONF_no_bidi);
|
2024-08-10 11:11:28 +00:00
|
|
|
term->no_bracketed_paste = conf_get_bool(term->conf, CONF_no_bracketed_paste);
|
2018-10-29 19:57:31 +00:00
|
|
|
term->bksp_is_delete = conf_get_bool(term->conf, CONF_bksp_is_delete);
|
|
|
|
term->blink_cur = conf_get_bool(term->conf, CONF_blink_cur);
|
|
|
|
term->blinktext = conf_get_bool(term->conf, CONF_blinktext);
|
|
|
|
term->cjk_ambig_wide = conf_get_bool(term->conf, CONF_cjk_ambig_wide);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term->conf_height = conf_get_int(term->conf, CONF_height);
|
|
|
|
term->conf_width = conf_get_int(term->conf, CONF_width);
|
2018-10-29 19:57:31 +00:00
|
|
|
term->crhaslf = conf_get_bool(term->conf, CONF_crhaslf);
|
|
|
|
term->erase_to_scrollback = conf_get_bool(term->conf, CONF_erase_to_scrollback);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term->funky_type = conf_get_int(term->conf, CONF_funky_type);
|
New config option for shifted arrow key handling.
This commit introduces a new config option for how to handle shifted
arrow keys.
In the default mode (SHARROW_APPLICATION), we do what we've always
done: Ctrl flips the arrow keys between sending their most usual
escape sequences (ESC [ A ... ESC [ D) and sending the 'application
cursor keys' sequences (ESC O A ... ESC O D). Whichever of those modes
is currently configured, Ctrl+arrow sends the other one.
In the new mode (SHARROW_BITMAP), application cursor key mode is
unaffected by any shift keys, but the default sequences acquire two
numeric arguments. The first argument is 1 (reflecting the fact that a
shifted arrow key still notionally moves just 1 character cell); the
second is the bitmap (1 for Shift) + (2 for Alt) + (4 for Ctrl),
offset by 1. (Except that if _none_ of those modifiers is pressed,
both numeric arguments are simply omitted.)
The new bitmap mode is what current xterm generates, and also what
Windows ConPTY seems to expect. If you start an ordinary Command
Prompt and launch into WSL, those are the sequences it will generate
for shifted arrow keys; conversely, if you run a Command Prompt within
a ConPTY, then these sequences for Ctrl+arrow will have the effect you
expect in cmd.exe command-line editing (going backward or forward a
word). For that reason, I enable this mode unconditionally when
launching Windows pterm.
2021-10-18 19:00:25 +00:00
|
|
|
term->sharrow_type = conf_get_int(term->conf, CONF_sharrow_type);
|
2018-10-29 19:57:31 +00:00
|
|
|
term->lfhascr = conf_get_bool(term->conf, CONF_lfhascr);
|
|
|
|
term->logflush = conf_get_bool(term->conf, CONF_logflush);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term->logtype = conf_get_int(term->conf, CONF_logtype);
|
2018-10-29 19:57:31 +00:00
|
|
|
term->mouse_override = conf_get_bool(term->conf, CONF_mouse_override);
|
|
|
|
term->nethack_keypad = conf_get_bool(term->conf, CONF_nethack_keypad);
|
|
|
|
term->no_alt_screen = conf_get_bool(term->conf, CONF_no_alt_screen);
|
|
|
|
term->no_applic_c = conf_get_bool(term->conf, CONF_no_applic_c);
|
|
|
|
term->no_applic_k = conf_get_bool(term->conf, CONF_no_applic_k);
|
|
|
|
term->no_dbackspace = conf_get_bool(term->conf, CONF_no_dbackspace);
|
|
|
|
term->no_mouse_rep = conf_get_bool(term->conf, CONF_no_mouse_rep);
|
|
|
|
term->no_remote_charset = conf_get_bool(term->conf, CONF_no_remote_charset);
|
|
|
|
term->no_remote_resize = conf_get_bool(term->conf, CONF_no_remote_resize);
|
|
|
|
term->no_remote_wintitle = conf_get_bool(term->conf, CONF_no_remote_wintitle);
|
|
|
|
term->no_remote_clearscroll = conf_get_bool(term->conf, CONF_no_remote_clearscroll);
|
|
|
|
term->rawcnp = conf_get_bool(term->conf, CONF_rawcnp);
|
|
|
|
term->utf8linedraw = conf_get_bool(term->conf, CONF_utf8linedraw);
|
|
|
|
term->rect_select = conf_get_bool(term->conf, CONF_rect_select);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term->remote_qtitle_action = conf_get_int(term->conf, CONF_remote_qtitle_action);
|
2018-10-29 19:57:31 +00:00
|
|
|
term->rxvt_homeend = conf_get_bool(term->conf, CONF_rxvt_homeend);
|
|
|
|
term->scroll_on_disp = conf_get_bool(term->conf, CONF_scroll_on_disp);
|
|
|
|
term->scroll_on_key = conf_get_bool(term->conf, CONF_scroll_on_key);
|
2021-02-07 19:59:21 +00:00
|
|
|
term->xterm_mouse_forbidden = conf_get_bool(term->conf, CONF_no_mouse_rep);
|
2018-10-29 19:57:31 +00:00
|
|
|
term->xterm_256_colour = conf_get_bool(term->conf, CONF_xterm_256_colour);
|
|
|
|
term->true_colour = conf_get_bool(term->conf, CONF_true_colour);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse the control-character escapes in the configured
|
|
|
|
* answerback string.
|
|
|
|
*/
|
|
|
|
{
|
2019-09-08 19:29:00 +00:00
|
|
|
char *answerback = conf_get_str(term->conf, CONF_answerback);
|
|
|
|
|
2022-09-13 14:00:26 +00:00
|
|
|
strbuf_clear(term->answerback);
|
2019-09-08 19:29:00 +00:00
|
|
|
|
|
|
|
while (*answerback) {
|
|
|
|
char *n;
|
|
|
|
char c = ctrlparse(answerback, &n);
|
|
|
|
if (n) {
|
2022-09-13 14:00:26 +00:00
|
|
|
put_byte(term->answerback, c);
|
2019-09-08 19:29:00 +00:00
|
|
|
answerback = n;
|
|
|
|
} else {
|
2022-09-13 14:00:26 +00:00
|
|
|
put_byte(term->answerback, *answerback++);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
}
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-07 19:59:20 +00:00
|
|
|
void term_pre_reconfig(Terminal *term, Conf *conf)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the current window title into the stored previous
|
|
|
|
* configuration, so that doing nothing to the window title field
|
|
|
|
* in the config box doesn't reset the title to its startup state.
|
|
|
|
*/
|
|
|
|
conf_set_str(conf, CONF_wintitle, term->window_title);
|
|
|
|
}
|
|
|
|
|
2002-03-06 23:04:20 +00:00
|
|
|
/*
|
|
|
|
* When the user reconfigures us, we need to check the forbidden-
|
2002-03-09 17:59:15 +00:00
|
|
|
* alternate-screen config option, disable raw mouse mode if the
|
|
|
|
* user has disabled mouse reporting, and abandon a print job if
|
|
|
|
* the user has disabled printing.
|
2002-03-06 23:04:20 +00:00
|
|
|
*/
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
void term_reconfig(Terminal *term, Conf *conf)
|
2002-03-06 23:04:20 +00:00
|
|
|
{
|
2003-01-12 14:30:02 +00:00
|
|
|
/*
|
|
|
|
* Before adopting the new config, check all those terminal
|
|
|
|
* settings which control power-on defaults; and if they've
|
|
|
|
* changed, we will modify the current state as well as the
|
|
|
|
* default one. The full list is: Auto wrap mode, DEC Origin
|
|
|
|
* Mode, BCE, blinking text, character classes.
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool reset_wrap, reset_decom, reset_bce, reset_tblink, reset_charclass;
|
2021-05-15 21:05:27 +00:00
|
|
|
bool palette_changed = false;
|
2003-01-12 14:30:02 +00:00
|
|
|
int i;
|
|
|
|
|
2018-10-29 19:57:31 +00:00
|
|
|
reset_wrap = (conf_get_bool(term->conf, CONF_wrap_mode) !=
|
2019-09-08 19:29:00 +00:00
|
|
|
conf_get_bool(conf, CONF_wrap_mode));
|
2018-10-29 19:57:31 +00:00
|
|
|
reset_decom = (conf_get_bool(term->conf, CONF_dec_om) !=
|
2019-09-08 19:29:00 +00:00
|
|
|
conf_get_bool(conf, CONF_dec_om));
|
2018-10-29 19:57:31 +00:00
|
|
|
reset_bce = (conf_get_bool(term->conf, CONF_bce) !=
|
2019-09-08 19:29:00 +00:00
|
|
|
conf_get_bool(conf, CONF_bce));
|
2018-10-29 19:57:31 +00:00
|
|
|
reset_tblink = (conf_get_bool(term->conf, CONF_blinktext) !=
|
2019-09-08 19:29:00 +00:00
|
|
|
conf_get_bool(conf, CONF_blinktext));
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
reset_charclass = false;
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
for (i = 0; i < 256; i++)
|
2019-09-08 19:29:00 +00:00
|
|
|
if (conf_get_int_int(term->conf, CONF_wordness, i) !=
|
|
|
|
conf_get_int_int(conf, CONF_wordness, i))
|
|
|
|
reset_charclass = true;
|
2003-01-12 14:30:02 +00:00
|
|
|
|
2004-10-15 11:14:42 +00:00
|
|
|
/*
|
|
|
|
* If the bidi or shaping settings have changed, flush the bidi
|
|
|
|
* cache completely.
|
|
|
|
*/
|
2019-03-26 21:13:19 +00:00
|
|
|
if (conf_get_bool(term->conf, CONF_no_arabicshaping) !=
|
2019-09-08 19:29:00 +00:00
|
|
|
conf_get_bool(conf, CONF_no_arabicshaping) ||
|
|
|
|
conf_get_bool(term->conf, CONF_no_bidi) !=
|
|
|
|
conf_get_bool(conf, CONF_no_bidi)) {
|
|
|
|
for (i = 0; i < term->bidi_cache_size; i++) {
|
|
|
|
sfree(term->pre_bidi_cache[i].chars);
|
|
|
|
sfree(term->post_bidi_cache[i].chars);
|
|
|
|
term->pre_bidi_cache[i].width = -1;
|
|
|
|
term->pre_bidi_cache[i].chars = NULL;
|
|
|
|
term->post_bidi_cache[i].width = -1;
|
|
|
|
term->post_bidi_cache[i].chars = NULL;
|
|
|
|
}
|
2004-10-15 11:14:42 +00:00
|
|
|
}
|
|
|
|
|
2021-02-07 19:59:20 +00:00
|
|
|
{
|
|
|
|
const char *old_title = conf_get_str(term->conf, CONF_wintitle);
|
|
|
|
const char *new_title = conf_get_str(conf, CONF_wintitle);
|
|
|
|
if (strcmp(old_title, new_title)) {
|
|
|
|
sfree(term->window_title);
|
|
|
|
term->window_title = dupstr(new_title);
|
win_set_[icon_]title: send a codepage along with the string.
While fixing the previous commit I noticed that window titles don't
actually _work_ properly if you change the terminal character set,
because the text accumulated in the OSC string buffer is sent to the
TermWin as raw bytes, with no indication of what character set it
should interpret them as. You might get lucky if you happened to
choose the right charset (in particular, UTF-8 is a common default),
but if you change the charset half way through a run, then there's
certainly no way the frontend will know to interpret two window titles
sent before and after the change in two different charsets.
So, now win_set_title() and win_set_icon_title() both include a
codepage parameter along with the byte string, and it's up to them to
translate the provided window title from that encoding to whatever the
local window system expects to receive.
On Windows, that's wide-string Unicode, so we can just use the
existing dup_mb_to_wc utility function. But in GTK, it's UTF-8, so I
had to write an extra utility function to encode a wide string as
UTF-8.
2021-10-16 12:20:44 +00:00
|
|
|
term->wintitle_codepage = DEFAULT_CODEPAGE;
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_title_pending = true;
|
|
|
|
term_schedule_update(term);
|
2021-02-07 19:59:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-15 21:05:27 +00:00
|
|
|
/*
|
|
|
|
* Just setting conf is sufficient to cause colour setting changes
|
|
|
|
* to appear on the next ESC]R palette reset. But we should also
|
|
|
|
* check whether any colour settings have been changed, so that
|
|
|
|
* they can be updated immediately if they haven't been overridden
|
|
|
|
* by some escape sequence.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
for (i = 0; i < CONF_NCOLOURS; i++) {
|
|
|
|
for (j = 0; j < 3; j++)
|
|
|
|
if (conf_get_int_int(term->conf, CONF_colours, i*3+j) !=
|
|
|
|
conf_get_int_int(conf, CONF_colours, i*3+j))
|
|
|
|
break;
|
|
|
|
if (j < 3) {
|
|
|
|
/* Actually enacting the change has to be deferred
|
|
|
|
* until the new conf is installed. */
|
|
|
|
palette_changed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
conf_free(term->conf);
|
|
|
|
term->conf = conf_copy(conf);
|
2003-01-12 14:30:02 +00:00
|
|
|
|
Don't set term->wrapnext when not in auto-wrapping mode.
A user sent a transcript from a curses-based tool 'ncmpc', which
carefully disables terminal autowrap when printing a character in the
bottom right corner of the display, and then turns it back on again.
After that, it expects that sending the backspace character really
moves the cursor back a space, instead of clearing the wrapnext flag.
But in PuTTY, we set the wrapnext flag even if we're not in wrapping
mode - it just doesn't _do_ anything when the next character is sent.
But it remains set, and still affects backspace. So the display is
corrupted by this change of expectation.
(Specifically, ncmpc is printing a time display [m:ss] in the very
bottom right, so it disables wrap in order to print the final ']'.
Then the next thing it needs to do is to update the low-order digit of
the seconds field, so it sends \b as the simplest way to get to that
character. The effect on the display is that the updated seconds digit
appears where the ] was, instead of overwriting the old seconds digit.)
This is a tradeoff in desirable behaviours. The point of having a
backspace operation cancel the wrapnext flag and not actually move the
cursor is to preserve the invariant that sending 'x', backspace, 'y'
causes the y to overprint the x, even if that happens near the end of
the terminal's line length. In non-wrapping mode that invariant was
bound to break _eventually_, but with this change, it breaks one
character earlier than before. However, I think that's less bad than
breaking the expectations of curses-based full-screen applications,
especially since the _main_ need for that invariant arises from naïve
applications that don't want to have to think about the terminal width
at all - and those applications generally run in _wrapping_ mode,
where it's possible to continue the invariant across multiple lines in
any case.
2024-08-10 09:38:02 +00:00
|
|
|
if (reset_wrap) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_wrap = term->wrap = conf_get_bool(term->conf, CONF_wrap_mode);
|
Don't set term->wrapnext when not in auto-wrapping mode.
A user sent a transcript from a curses-based tool 'ncmpc', which
carefully disables terminal autowrap when printing a character in the
bottom right corner of the display, and then turns it back on again.
After that, it expects that sending the backspace character really
moves the cursor back a space, instead of clearing the wrapnext flag.
But in PuTTY, we set the wrapnext flag even if we're not in wrapping
mode - it just doesn't _do_ anything when the next character is sent.
But it remains set, and still affects backspace. So the display is
corrupted by this change of expectation.
(Specifically, ncmpc is printing a time display [m:ss] in the very
bottom right, so it disables wrap in order to print the final ']'.
Then the next thing it needs to do is to update the low-order digit of
the seconds field, so it sends \b as the simplest way to get to that
character. The effect on the display is that the updated seconds digit
appears where the ] was, instead of overwriting the old seconds digit.)
This is a tradeoff in desirable behaviours. The point of having a
backspace operation cancel the wrapnext flag and not actually move the
cursor is to preserve the invariant that sending 'x', backspace, 'y'
causes the y to overprint the x, even if that happens near the end of
the terminal's line length. In non-wrapping mode that invariant was
bound to break _eventually_, but with this change, it breaks one
character earlier than before. However, I think that's less bad than
breaking the expectations of curses-based full-screen applications,
especially since the _main_ need for that invariant arises from naïve
applications that don't want to have to think about the terminal width
at all - and those applications generally run in _wrapping_ mode,
where it's possible to continue the invariant across multiple lines in
any case.
2024-08-10 09:38:02 +00:00
|
|
|
if (!term->wrap)
|
|
|
|
term->wrapnext = false;
|
|
|
|
if (!term->alt_wrap)
|
|
|
|
term->alt_wnext = false;
|
|
|
|
}
|
2003-01-12 14:30:02 +00:00
|
|
|
if (reset_decom)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_om = term->dec_om = conf_get_bool(term->conf, CONF_dec_om);
|
2003-04-12 09:05:28 +00:00
|
|
|
if (reset_bce) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->use_bce = conf_get_bool(term->conf, CONF_bce);
|
|
|
|
set_erase_char(term);
|
2003-04-12 09:05:28 +00:00
|
|
|
}
|
2004-11-27 13:20:21 +00:00
|
|
|
if (reset_tblink) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->blink_is_real = conf_get_bool(term->conf, CONF_blinktext);
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
2003-01-12 14:30:02 +00:00
|
|
|
if (reset_charclass)
|
2019-09-08 19:29:00 +00:00
|
|
|
for (i = 0; i < 256; i++)
|
|
|
|
term->wordness[i] = conf_get_int_int(term->conf, CONF_wordness, i);
|
2003-01-12 14:30:02 +00:00
|
|
|
|
2018-10-29 19:57:31 +00:00
|
|
|
if (conf_get_bool(term->conf, CONF_no_alt_screen))
|
2019-09-08 19:29:00 +00:00
|
|
|
swap_screen(term, 0, false, false);
|
2018-10-29 19:57:31 +00:00
|
|
|
if (conf_get_bool(term->conf, CONF_no_remote_charset)) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->cset_attr[0] = term->cset_attr[1] = CSET_ASCII;
|
|
|
|
term->sco_acs = term->alt_sco_acs = 0;
|
|
|
|
term->utf = false;
|
2002-03-06 23:04:20 +00:00
|
|
|
}
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
if (!conf_get_str(term->conf, CONF_printer)) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term_print_finish(term);
|
2002-03-09 17:59:15 +00:00
|
|
|
}
|
2021-05-15 21:05:27 +00:00
|
|
|
if (palette_changed)
|
|
|
|
term_notify_palette_changed(term);
|
2004-11-27 13:20:21 +00:00
|
|
|
term_schedule_tblink(term);
|
|
|
|
term_schedule_cblink(term);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term_copy_stuff_from_conf(term);
|
2021-02-07 19:59:21 +00:00
|
|
|
term_update_raw_mouse_mode(term);
|
2002-03-06 23:04:20 +00:00
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Clear the scrollback.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
void term_clrsb(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2006-02-18 22:30:10 +00:00
|
|
|
unsigned char *line;
|
2014-07-24 18:13:16 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scroll forward to the current screen, if we were back in the
|
|
|
|
* scrollback somewhere until now.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
term->disptop = 0;
|
2014-07-24 18:13:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the actual scrollback.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
while ((line = delpos234(term->scrollback, 0)) != NULL) {
|
2019-09-08 19:29:00 +00:00
|
|
|
sfree(line); /* this is compressed data, not a termline */
|
2001-04-16 21:25:13 +00:00
|
|
|
}
|
2014-07-24 18:13:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When clearing the scrollback, we also truncate any termlines on
|
|
|
|
* the current screen which have remembered data from a previous
|
|
|
|
* larger window size. Rationale: clearing the scrollback is
|
|
|
|
* sometimes done to protect privacy, so the user intention is
|
|
|
|
* specifically that we should not retain evidence of what
|
|
|
|
* previously happened in the terminal, and that ought to include
|
|
|
|
* evidence to the right as well as evidence above.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < term->rows; i++)
|
|
|
|
check_line_size(term, scrlineptr(i));
|
|
|
|
|
2019-07-24 18:00:52 +00:00
|
|
|
/*
|
|
|
|
* That operation has invalidated the selection, if it overlapped
|
|
|
|
* the scrollback at all.
|
|
|
|
*/
|
|
|
|
if (term->selstate != NO_SELECTION && term->selstart.y < 0)
|
|
|
|
deselect(term);
|
|
|
|
|
2014-07-24 18:13:16 +00:00
|
|
|
/*
|
|
|
|
* There are now no lines of real scrollback which can be pulled
|
|
|
|
* back into the screen by a resize, and no lines of the alternate
|
|
|
|
* screen which should be displayed as if part of the scrollback.
|
|
|
|
*/
|
2003-03-07 18:18:38 +00:00
|
|
|
term->tempsblines = 0;
|
2003-03-06 12:51:12 +00:00
|
|
|
term->alt_sblines = 0;
|
2014-07-24 18:13:16 +00:00
|
|
|
|
|
|
|
/*
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
* The scrollbar will need updating to reflect the new state of
|
|
|
|
* the world.
|
2014-07-24 18:13:16 +00:00
|
|
|
*/
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_scrollbar_update_pending = true;
|
|
|
|
term_schedule_update(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2017-09-30 16:32:32 +00:00
|
|
|
const optionalrgb optionalrgb_none = {0, 0, 0, 0};
|
|
|
|
|
2021-02-07 19:59:20 +00:00
|
|
|
void term_setup_window_titles(Terminal *term, const char *title_hostname)
|
|
|
|
{
|
|
|
|
const char *conf_title = conf_get_str(term->conf, CONF_wintitle);
|
|
|
|
sfree(term->window_title);
|
|
|
|
sfree(term->icon_title);
|
|
|
|
if (*conf_title) {
|
|
|
|
term->window_title = dupstr(conf_title);
|
|
|
|
term->icon_title = dupstr(conf_title);
|
|
|
|
} else {
|
2021-03-08 06:59:26 +00:00
|
|
|
if (title_hostname && *title_hostname)
|
2021-02-07 19:59:20 +00:00
|
|
|
term->window_title = dupcat(title_hostname, " - ", appname);
|
|
|
|
else
|
|
|
|
term->window_title = dupstr(appname);
|
|
|
|
term->icon_title = dupstr(term->window_title);
|
|
|
|
}
|
win_set_[icon_]title: send a codepage along with the string.
While fixing the previous commit I noticed that window titles don't
actually _work_ properly if you change the terminal character set,
because the text accumulated in the OSC string buffer is sent to the
TermWin as raw bytes, with no indication of what character set it
should interpret them as. You might get lucky if you happened to
choose the right charset (in particular, UTF-8 is a common default),
but if you change the charset half way through a run, then there's
certainly no way the frontend will know to interpret two window titles
sent before and after the change in two different charsets.
So, now win_set_title() and win_set_icon_title() both include a
codepage parameter along with the byte string, and it's up to them to
translate the provided window title from that encoding to whatever the
local window system expects to receive.
On Windows, that's wide-string Unicode, so we can just use the
existing dup_mb_to_wc utility function. But in GTK, it's UTF-8, so I
had to write an extra utility function to encode a wide string as
UTF-8.
2021-10-16 12:20:44 +00:00
|
|
|
term->wintitle_codepage = term->icontitle_codepage = DEFAULT_CODEPAGE;
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_title_pending = true;
|
|
|
|
term->win_icon_title_pending = true;
|
2021-02-07 19:59:20 +00:00
|
|
|
}
|
|
|
|
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
static void palette_rebuild(Terminal *term)
|
|
|
|
{
|
|
|
|
unsigned min_changed = OSC4_NCOLOURS, max_changed = 0;
|
|
|
|
|
2021-06-12 23:18:42 +00:00
|
|
|
if (term->win_palette_pending) {
|
|
|
|
/* Possibly extend existing range. */
|
|
|
|
min_changed = term->win_palette_pending_min;
|
|
|
|
max_changed = term->win_palette_pending_limit - 1;
|
|
|
|
} else {
|
|
|
|
/* Start with empty range. */
|
|
|
|
min_changed = OSC4_NCOLOURS;
|
|
|
|
max_changed = 0;
|
|
|
|
}
|
|
|
|
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
for (unsigned i = 0; i < OSC4_NCOLOURS; i++) {
|
|
|
|
rgb new_value;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
for (unsigned j = lenof(term->subpalettes); j-- > 0 ;) {
|
|
|
|
if (term->subpalettes[j].present[i]) {
|
|
|
|
new_value = term->subpalettes[j].values[i];
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(found); /* we expect SUBPAL_CONF to always be set */
|
|
|
|
|
|
|
|
if (new_value.r != term->palette[i].r ||
|
|
|
|
new_value.g != term->palette[i].g ||
|
|
|
|
new_value.b != term->palette[i].b) {
|
|
|
|
term->palette[i] = new_value;
|
|
|
|
if (min_changed > i)
|
|
|
|
min_changed = i;
|
|
|
|
if (max_changed < i)
|
|
|
|
max_changed = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (min_changed <= max_changed) {
|
|
|
|
/*
|
2021-06-12 23:18:42 +00:00
|
|
|
* At least one colour changed (or we had an update scheduled
|
|
|
|
* already). Schedule a redraw event to pass the result back
|
|
|
|
* to the TermWin. This also requires invalidating the rest
|
|
|
|
* of the window, because usually all the text will need
|
|
|
|
* redrawing in the new colours.
|
|
|
|
* (If there was an update pending and this palette rebuild
|
|
|
|
* didn't actually change anything, we'll harmlessly reinforce
|
|
|
|
* the existing update request.)
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
*/
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_palette_pending = true;
|
|
|
|
term->win_palette_pending_min = min_changed;
|
|
|
|
term->win_palette_pending_limit = max_changed + 1;
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
term_invalidate(term);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-15 21:05:27 +00:00
|
|
|
/*
|
|
|
|
* Rebuild the palette from configuration and platform colours.
|
|
|
|
* If 'keep_overrides' set, any escape-sequence-specified overrides will
|
|
|
|
* remain in place.
|
|
|
|
*/
|
|
|
|
static void palette_reset(Terminal *term, bool keep_overrides)
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
{
|
2021-05-15 21:05:27 +00:00
|
|
|
for (unsigned i = 0; i < OSC4_NCOLOURS; i++)
|
|
|
|
term->subpalettes[SUBPAL_CONF].present[i] = true;
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
|
2021-05-15 21:05:27 +00:00
|
|
|
/*
|
|
|
|
* Copy all the palette information out of the Conf.
|
|
|
|
*/
|
|
|
|
for (unsigned i = 0; i < CONF_NCOLOURS; i++) {
|
|
|
|
rgb *col = &term->subpalettes[SUBPAL_CONF].values[
|
|
|
|
colour_indices_conf_to_osc4[i]];
|
|
|
|
col->r = conf_get_int_int(term->conf, CONF_colours, i*3+0);
|
|
|
|
col->g = conf_get_int_int(term->conf, CONF_colours, i*3+1);
|
|
|
|
col->b = conf_get_int_int(term->conf, CONF_colours, i*3+2);
|
|
|
|
}
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
|
2021-05-15 21:05:27 +00:00
|
|
|
/*
|
|
|
|
* Directly invent the rest of the xterm-256 colours.
|
|
|
|
*/
|
|
|
|
for (unsigned i = 0; i < 216; i++) {
|
|
|
|
rgb *col = &term->subpalettes[SUBPAL_CONF].values[i + 16];
|
|
|
|
int r = i / 36, g = (i / 6) % 6, b = i % 6;
|
|
|
|
col->r = r ? r * 40 + 55 : 0;
|
|
|
|
col->g = g ? g * 40 + 55 : 0;
|
|
|
|
col->b = b ? b * 40 + 55 : 0;
|
|
|
|
}
|
|
|
|
for (unsigned i = 0; i < 24; i++) {
|
|
|
|
rgb *col = &term->subpalettes[SUBPAL_CONF].values[i + 232];
|
|
|
|
int shade = i * 10 + 8;
|
|
|
|
col->r = col->g = col->b = shade;
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Re-fetch any OS-local overrides.
|
|
|
|
*/
|
|
|
|
for (unsigned i = 0; i < OSC4_NCOLOURS; i++)
|
|
|
|
term->subpalettes[SUBPAL_PLATFORM].present[i] = false;
|
2021-05-08 17:13:06 +00:00
|
|
|
win_palette_get_overrides(term->win, term);
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
|
2021-05-15 21:05:27 +00:00
|
|
|
if (!keep_overrides) {
|
|
|
|
/*
|
|
|
|
* Get rid of all escape-sequence configuration.
|
|
|
|
*/
|
|
|
|
for (unsigned i = 0; i < OSC4_NCOLOURS; i++)
|
|
|
|
term->subpalettes[SUBPAL_SESSION].present[i] = false;
|
|
|
|
}
|
|
|
|
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
/*
|
|
|
|
* Rebuild the composite palette.
|
|
|
|
*/
|
|
|
|
palette_rebuild(term);
|
|
|
|
}
|
|
|
|
|
|
|
|
void term_palette_override(Terminal *term, unsigned osc4_index, rgb rgb)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We never expect to be called except as re-entry from our own
|
|
|
|
* call to win_palette_get_overrides above, so we need not mess
|
|
|
|
* about calling palette_rebuild.
|
|
|
|
*/
|
|
|
|
term->subpalettes[SUBPAL_PLATFORM].present[osc4_index] = true;
|
|
|
|
term->subpalettes[SUBPAL_PLATFORM].values[osc4_index] = rgb;
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Initialise the terminal.
|
|
|
|
*/
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
Terminal *term_init(Conf *myconf, struct unicode_data *ucsdata, TermWin *win)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2002-10-22 16:11:33 +00:00
|
|
|
Terminal *term;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a new Terminal structure and initialise the fields
|
|
|
|
* that need it.
|
|
|
|
*/
|
2003-03-29 16:14:26 +00:00
|
|
|
term = snew(Terminal);
|
2023-03-05 13:26:09 +00:00
|
|
|
memset(term, 0, sizeof(Terminal));
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
term->win = win;
|
2003-01-14 18:28:23 +00:00
|
|
|
term->ucsdata = ucsdata;
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term->conf = conf_copy(myconf);
|
2002-10-22 16:11:33 +00:00
|
|
|
term->compatibility_level = TM_PUTTY;
|
|
|
|
strcpy(term->id_string, "\033[?6c");
|
|
|
|
bufchain_init(&term->inbuf);
|
|
|
|
bufchain_init(&term->printer_buf);
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->has_focus = true;
|
2002-10-23 12:41:35 +00:00
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
term->selstate = NO_SELECTION;
|
2022-09-13 14:00:26 +00:00
|
|
|
term->answerback = strbuf_new();
|
2002-10-22 16:11:33 +00:00
|
|
|
|
2011-09-13 10:27:00 +00:00
|
|
|
term_copy_stuff_from_conf(term);
|
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
term->dispcursx = term->dispcursy = -1;
|
2002-10-22 16:11:33 +00:00
|
|
|
deselect(term);
|
|
|
|
term->rows = term->cols = -1;
|
2018-10-29 19:50:29 +00:00
|
|
|
power_on(term, true);
|
2002-11-29 00:32:03 +00:00
|
|
|
term->attr_mask = 0xffffffff;
|
2002-10-22 16:11:33 +00:00
|
|
|
|
2004-10-14 16:42:43 +00:00
|
|
|
/* FULL-TERMCHAR */
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
term->basic_erase_char.chr = CSET_ASCII | ' ';
|
|
|
|
term->basic_erase_char.attr = ATTR_DEFAULT;
|
2017-09-30 16:32:32 +00:00
|
|
|
term->basic_erase_char.truecolour.fg = optionalrgb_none;
|
|
|
|
term->basic_erase_char.truecolour.bg = optionalrgb_none;
|
2004-10-14 16:42:43 +00:00
|
|
|
term->erase_char = term->basic_erase_char;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
/* TermWin implementations will typically extend these with
|
|
|
|
* clipboard ids they know about */
|
2017-12-10 15:45:45 +00:00
|
|
|
term->mouse_select_clipboards[0] = CLIP_LOCAL;
|
|
|
|
term->n_mouse_select_clipboards = 1;
|
2017-12-09 12:00:13 +00:00
|
|
|
term->mouse_paste_clipboard = CLIP_NULL;
|
|
|
|
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
term->trusted = true;
|
|
|
|
|
2021-02-07 19:59:20 +00:00
|
|
|
term->window_title = dupstr("");
|
|
|
|
term->icon_title = dupstr("");
|
win_set_[icon_]title: send a codepage along with the string.
While fixing the previous commit I noticed that window titles don't
actually _work_ properly if you change the terminal character set,
because the text accumulated in the OSC string buffer is sent to the
TermWin as raw bytes, with no indication of what character set it
should interpret them as. You might get lucky if you happened to
choose the right charset (in particular, UTF-8 is a common default),
but if you change the charset half way through a run, then there's
certainly no way the frontend will know to interpret two window titles
sent before and after the change in two different charsets.
So, now win_set_title() and win_set_icon_title() both include a
codepage parameter along with the byte string, and it's up to them to
translate the provided window title from that encoding to whatever the
local window system expects to receive.
On Windows, that's wide-string Unicode, so we can just use the
existing dup_mb_to_wc utility function. But in GTK, it's UTF-8, so I
had to write an extra utility function to encode a wide string as
UTF-8.
2021-10-16 12:20:44 +00:00
|
|
|
term->wintitle_codepage = term->icontitle_codepage = DEFAULT_CODEPAGE;
|
2021-02-07 19:59:20 +00:00
|
|
|
|
Suspend terminal output while a window resize is pending.
This is the payoff from the last few commits of refactoring. It fixes
the following race-condition bug in terminal application redraw:
* server sends a window-resizing escape sequence
* terminal requests a window resize from the front end
* server sends further escape sequences to perform a redraw of some
full-screen application, which assume that the window resize has
occurred and the window is already its new size
* terminal processes all those sequences in the context of the old
window size, while the front end is still thinking
* window resize completes in the front end and term_size() tells the
terminal it now has its new size, but it's too late, the screen
redraw has made a total mess.
(Perhaps the server might even send its window resize + followup
redraw all in one SSH packet, so that it's all queued in term->inbuf
in one go.)
As far as I can see, handling of this case has been broken more or
less forever in the GTK frontend (where window resizes are inherently
asynchronous due to the way X11 works, and we've never done anything
to compensate for that). On Windows, where window size is changed via
SetWindowPos which is synchronous, it used to work, but broke in
commit d74308e90e3813a (i.e. between 0.74 and 0.75), which made all
the ancillary window updates run on the same delayed-action timer as
ordinary text display.
So, it's time to fix it, and I think now I should be able to fix it in
GTK as well as on Windows.
Now, as soon as we've set the term->win_resize_pending flag (in
response to a resize escape sequence), the next return to the top of
the main loop in term_out will terminate output processing early,
leaving any further terminal data still in the term->inbuf bufchain.
Once we get a term_size() callback from the front end telling us our
new size, we reset term->win_resize_pending, which unblocks output
processing again, and we also queue a toplevel callback to have
another try at term_out() so that it will be unblocked promptly.
To implement this I've changed term->win_resize_pending from a bool
into a three-state enumeration, so that we can tell the difference
between 'pending' in the sense of not yet having sent our resize
request to the frontend, and in the sense of waiting for the frontend
to reply. That way, a window resize from the GUI user at least won't
be mistaken for the response to our resize request if it arrives in
the former state. (It can still be mistaken for one in the latter
case, but if the user is resizing the window at the same time as the
server-side application is doing critically size-dependent redrawing,
I don't think there can be any reasonable expectation of nothing going
wrong.)
As mentioned in the previous commit, some failure modes under X11 (in
particular the window manager process getting wedged in some way) can
result in no response being received to a ConfigureWindow request. In
that situation, it seems to me that we really _shouldn't_ sit there
waiting forever - perhaps it's technically the WM's fault and not
ours, but what kind of X window are you most likely to want to use to
do emergency WM repair? A terminal window, of course, so it would be
exceptionally unhelpful to make any terminal window stop working
completely in this situation! Hence, there's a fallback timeout in
terminal.c, so that if we don't receive a response in _too_ long,
we'll assume one is not forthcoming, and resume processing terminal
data at the old window size. The fallback timeout is set to 5 seconds,
following existing practice in libXt (DEFAULT_WM_TIMEOUT).
2021-12-19 10:37:02 +00:00
|
|
|
term->win_resize_pending = WIN_RESIZE_NO;
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
|
2021-10-10 13:40:51 +00:00
|
|
|
term->bidi_ctx = bidi_new_context();
|
|
|
|
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
palette_reset(term, false);
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
return term;
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2003-01-15 23:30:21 +00:00
|
|
|
void term_free(Terminal *term)
|
|
|
|
{
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
compressed_scrollback_line *cline;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline *line;
|
2003-01-15 23:30:21 +00:00
|
|
|
struct beeptime *beep;
|
2004-05-22 10:36:50 +00:00
|
|
|
int i;
|
2003-01-15 23:30:21 +00:00
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
while ((cline = delpos234(term->scrollback, 0)) != NULL)
|
|
|
|
free_compressed_line(cline);
|
2003-01-15 23:30:21 +00:00
|
|
|
freetree234(term->scrollback);
|
|
|
|
while ((line = delpos234(term->screen, 0)) != NULL)
|
2019-09-08 19:29:00 +00:00
|
|
|
freetermline(line);
|
2003-01-15 23:30:21 +00:00
|
|
|
freetree234(term->screen);
|
|
|
|
while ((line = delpos234(term->alt_screen, 0)) != NULL)
|
2019-09-08 19:29:00 +00:00
|
|
|
freetermline(line);
|
2003-01-15 23:30:21 +00:00
|
|
|
freetree234(term->alt_screen);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
if (term->disptext) {
|
2019-09-08 19:29:00 +00:00
|
|
|
for (i = 0; i < term->rows; i++)
|
|
|
|
freetermline(term->disptext[i]);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
2003-01-15 23:30:21 +00:00
|
|
|
sfree(term->disptext);
|
|
|
|
while (term->beephead) {
|
2019-09-08 19:29:00 +00:00
|
|
|
beep = term->beephead;
|
|
|
|
term->beephead = beep->next;
|
|
|
|
sfree(beep);
|
2003-01-15 23:30:21 +00:00
|
|
|
}
|
|
|
|
bufchain_clear(&term->inbuf);
|
2022-12-28 15:32:24 +00:00
|
|
|
if (term->print_job)
|
2019-09-08 19:29:00 +00:00
|
|
|
printer_finish_job(term->print_job);
|
2003-01-15 23:30:21 +00:00
|
|
|
bufchain_clear(&term->printer_buf);
|
|
|
|
sfree(term->paste_buffer);
|
2004-05-22 10:36:50 +00:00
|
|
|
sfree(term->ltemp);
|
|
|
|
sfree(term->wcFrom);
|
|
|
|
sfree(term->wcTo);
|
2022-09-13 14:00:26 +00:00
|
|
|
strbuf_free(term->answerback);
|
2004-05-22 10:36:50 +00:00
|
|
|
|
|
|
|
for (i = 0; i < term->bidi_cache_size; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
sfree(term->pre_bidi_cache[i].chars);
|
|
|
|
sfree(term->post_bidi_cache[i].chars);
|
2012-08-30 18:48:08 +00:00
|
|
|
sfree(term->post_bidi_cache[i].forward);
|
|
|
|
sfree(term->post_bidi_cache[i].backward);
|
2004-05-22 10:36:50 +00:00
|
|
|
}
|
|
|
|
sfree(term->pre_bidi_cache);
|
|
|
|
sfree(term->post_bidi_cache);
|
|
|
|
|
2012-08-30 18:48:08 +00:00
|
|
|
sfree(term->tabs);
|
|
|
|
|
2004-11-27 13:20:21 +00:00
|
|
|
expire_timer_context(term);
|
2019-04-20 07:20:34 +00:00
|
|
|
delete_callbacks_for_context(term);
|
2004-11-27 13:20:21 +00:00
|
|
|
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
conf_free(term->conf);
|
|
|
|
|
2021-02-07 19:59:20 +00:00
|
|
|
sfree(term->window_title);
|
|
|
|
sfree(term->icon_title);
|
|
|
|
|
2021-10-10 13:40:51 +00:00
|
|
|
bidi_free_context(term->bidi_ctx);
|
|
|
|
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
/* In case a term_userpass_state is still around */
|
|
|
|
if (term->userpass_state)
|
|
|
|
term_userpass_state_free(term->userpass_state);
|
|
|
|
|
2003-01-15 23:30:21 +00:00
|
|
|
sfree(term);
|
|
|
|
}
|
|
|
|
|
2019-03-10 14:42:11 +00:00
|
|
|
void term_set_trust_status(Terminal *term, bool trusted)
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
{
|
2019-03-10 14:42:11 +00:00
|
|
|
term->trusted = trusted;
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
}
|
|
|
|
|
2019-04-04 19:17:24 +00:00
|
|
|
void term_get_cursor_position(Terminal *term, int *x, int *y)
|
|
|
|
{
|
|
|
|
*x = term->curs.x;
|
|
|
|
*y = term->curs.y;
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Set up the terminal for a given size.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
void term_size(Terminal *term, int newrows, int newcols, int newsavelines)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2001-05-13 14:02:28 +00:00
|
|
|
tree234 *newalt;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline **newdisp, *line;
|
2004-10-13 13:35:29 +00:00
|
|
|
int i, j, oldrows = term->rows;
|
2001-04-28 15:32:25 +00:00
|
|
|
int sblen;
|
2002-10-22 16:11:33 +00:00
|
|
|
int save_alt_which = term->alt_which;
|
2000-03-11 14:10:10 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
if (newrows == term->rows && newcols == term->cols &&
|
2019-09-08 19:29:00 +00:00
|
|
|
newsavelines == term->savelines)
|
|
|
|
return; /* nothing to do */
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2006-01-10 20:02:22 +00:00
|
|
|
/* Behave sensibly if we're given zero (or negative) rows/cols */
|
|
|
|
|
|
|
|
if (newrows < 1) newrows = 1;
|
|
|
|
if (newcols < 1) newcols = 1;
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
deselect(term);
|
2018-10-29 19:50:29 +00:00
|
|
|
swap_screen(term, 0, false, false);
|
2000-03-11 14:10:10 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
term->alt_t = term->marg_t = 0;
|
|
|
|
term->alt_b = term->marg_b = newrows - 1;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->rows == -1) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->scrollback = newtree234(NULL);
|
|
|
|
term->screen = newtree234(NULL);
|
|
|
|
term->tempsblines = 0;
|
|
|
|
term->rows = 0;
|
2001-04-28 15:32:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Resize the screen and scrollback. We only need to shift
|
|
|
|
* lines around within our data structures, because lineptr()
|
|
|
|
* will take care of resizing each individual line if
|
|
|
|
* necessary. So:
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2003-03-07 18:18:38 +00:00
|
|
|
* - If the new screen is longer, we shunt lines in from temporary
|
|
|
|
* scrollback if possible, otherwise we add new blank lines at
|
|
|
|
* the bottom.
|
|
|
|
*
|
|
|
|
* - If the new screen is shorter, we remove any blank lines at
|
|
|
|
* the bottom if possible, otherwise shunt lines above the cursor
|
|
|
|
* to scrollback if possible, otherwise delete lines below the
|
|
|
|
* cursor.
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2001-04-28 15:32:25 +00:00
|
|
|
* - Then, if the new scrollback length is less than the
|
|
|
|
* amount of scrollback we actually have, we must throw some
|
|
|
|
* away.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
sblen = count234(term->scrollback);
|
2001-05-03 10:10:53 +00:00
|
|
|
/* Do this loop to expand the screen if newrows > rows */
|
2003-03-07 18:18:38 +00:00
|
|
|
assert(term->rows == count234(term->screen));
|
|
|
|
while (term->rows < newrows) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->tempsblines > 0) {
|
|
|
|
compressed_scrollback_line *cline;
|
|
|
|
/* Insert a line from the scrollback at the top of the screen. */
|
|
|
|
assert(sblen >= term->tempsblines);
|
|
|
|
cline = delpos234(term->scrollback, --sblen);
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
line = decompressline_and_free(cline);
|
2019-09-08 19:29:00 +00:00
|
|
|
line->temporary = false; /* reconstituted line is now real */
|
|
|
|
term->tempsblines -= 1;
|
|
|
|
addpos234(term->screen, line, 0);
|
|
|
|
term->curs.y += 1;
|
|
|
|
term->savecurs.y += 1;
|
|
|
|
term->alt_y += 1;
|
|
|
|
term->alt_savecurs.y += 1;
|
|
|
|
} else {
|
|
|
|
/* Add a new blank line at the bottom of the screen. */
|
|
|
|
line = newtermline(term, newcols, false);
|
|
|
|
addpos234(term->screen, line, count234(term->screen));
|
|
|
|
}
|
|
|
|
term->rows += 1;
|
2001-05-03 10:10:53 +00:00
|
|
|
}
|
|
|
|
/* Do this loop to shrink the screen if newrows < rows */
|
2003-03-07 18:18:38 +00:00
|
|
|
while (term->rows > newrows) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->curs.y < term->rows - 1) {
|
|
|
|
/* delete bottom row, unless it contains the cursor */
|
2012-08-30 18:48:08 +00:00
|
|
|
line = delpos234(term->screen, term->rows - 1);
|
2019-02-26 18:36:20 +00:00
|
|
|
freetermline(line);
|
2019-09-08 19:29:00 +00:00
|
|
|
} else {
|
|
|
|
/* push top row to scrollback */
|
|
|
|
line = delpos234(term->screen, 0);
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
addpos234(term->scrollback, compressline_and_free(line), sblen++);
|
2019-09-08 19:29:00 +00:00
|
|
|
term->tempsblines += 1;
|
|
|
|
term->curs.y -= 1;
|
|
|
|
term->savecurs.y -= 1;
|
|
|
|
term->alt_y -= 1;
|
|
|
|
term->alt_savecurs.y -= 1;
|
|
|
|
}
|
|
|
|
term->rows -= 1;
|
2001-04-28 15:32:25 +00:00
|
|
|
}
|
2003-03-07 18:18:38 +00:00
|
|
|
assert(term->rows == newrows);
|
2002-10-22 16:11:33 +00:00
|
|
|
assert(count234(term->screen) == newrows);
|
2003-03-07 18:18:38 +00:00
|
|
|
|
|
|
|
/* Delete any excess lines from the scrollback. */
|
2001-04-28 15:32:25 +00:00
|
|
|
while (sblen > newsavelines) {
|
2019-09-08 19:29:00 +00:00
|
|
|
line = delpos234(term->scrollback, 0);
|
|
|
|
sfree(line);
|
|
|
|
sblen--;
|
2000-07-26 12:13:51 +00:00
|
|
|
}
|
2003-03-07 18:18:38 +00:00
|
|
|
if (sblen < term->tempsblines)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->tempsblines = sblen;
|
2002-10-22 16:11:33 +00:00
|
|
|
assert(count234(term->scrollback) <= newsavelines);
|
2003-03-07 18:18:38 +00:00
|
|
|
assert(count234(term->scrollback) >= term->tempsblines);
|
2002-10-22 16:11:33 +00:00
|
|
|
term->disptop = 0;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2003-03-07 18:18:38 +00:00
|
|
|
/* Make a new displayed text buffer. */
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
newdisp = snewn(newrows, termline *);
|
|
|
|
for (i = 0; i < newrows; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
newdisp[i] = newtermline(term, newcols, false);
|
|
|
|
for (j = 0; j < newcols; j++)
|
|
|
|
newdisp[i]->chars[j].attr = ATTR_INVALID;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
|
|
|
if (term->disptext) {
|
2019-09-08 19:29:00 +00:00
|
|
|
for (i = 0; i < oldrows; i++)
|
|
|
|
freetermline(term->disptext[i]);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
sfree(term->disptext);
|
|
|
|
term->disptext = newdisp;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
term->dispcursx = term->dispcursy = -1;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2003-03-07 18:18:38 +00:00
|
|
|
/* Make a new alternate screen. */
|
2001-04-16 21:25:13 +00:00
|
|
|
newalt = newtree234(NULL);
|
2001-05-06 14:35:20 +00:00
|
|
|
for (i = 0; i < newrows; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
line = newtermline(term, newcols, true);
|
|
|
|
addpos234(newalt, line, i);
|
2001-04-16 21:25:13 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->alt_screen) {
|
2019-09-08 19:29:00 +00:00
|
|
|
while (NULL != (line = delpos234(term->alt_screen, 0)))
|
|
|
|
freetermline(line);
|
|
|
|
freetree234(term->alt_screen);
|
2001-04-16 21:25:13 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
term->alt_screen = newalt;
|
2003-03-06 12:51:12 +00:00
|
|
|
term->alt_sblines = 0;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2003-03-29 16:14:26 +00:00
|
|
|
term->tabs = sresize(term->tabs, newcols, unsigned char);
|
1999-01-08 13:02:13 +00:00
|
|
|
{
|
2019-09-08 19:29:00 +00:00
|
|
|
int i;
|
|
|
|
for (i = (term->cols > 0 ? term->cols : 0); i < newcols; i++)
|
|
|
|
term->tabs[i] = (i % 8 == 0 ? true : false);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2003-03-07 18:18:38 +00:00
|
|
|
/* Check that the cursor positions are still valid. */
|
|
|
|
if (term->savecurs.y < 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->savecurs.y = 0;
|
2003-03-07 18:18:38 +00:00
|
|
|
if (term->savecurs.y >= newrows)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->savecurs.y = newrows - 1;
|
Fix a cursor positioning infelicity.
The scenario: I start a small, say 80x24, pterm. I do some work in
it, generating plenty of scrollback, and eventually I `less' a file.
`less' switches to the alt screen. Then I want more vertical space
to look at the file, so I enlarge the window to more like 80x60.
When I quit `less' and switch back to the primary screen, some
scrollback has been pulled down into the screen, as expected - but
the saved _cursor position_ is still at line 24, not at the bottom
of the new terminal where the prompt it goes with has moved to.
Solution: term_size() should adjust the alt-screen saved cursor
positions as well as the normal cursor position.
(Curiously, the problem doesn't happen on my home Debian box, even
without this fix. It happens on my RH9 box at work, though.)
[originally from svn r7911]
2008-03-07 18:30:37 +00:00
|
|
|
if (term->savecurs.x >= newcols)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->savecurs.x = newcols - 1;
|
Fix a cursor positioning infelicity.
The scenario: I start a small, say 80x24, pterm. I do some work in
it, generating plenty of scrollback, and eventually I `less' a file.
`less' switches to the alt screen. Then I want more vertical space
to look at the file, so I enlarge the window to more like 80x60.
When I quit `less' and switch back to the primary screen, some
scrollback has been pulled down into the screen, as expected - but
the saved _cursor position_ is still at line 24, not at the bottom
of the new terminal where the prompt it goes with has moved to.
Solution: term_size() should adjust the alt-screen saved cursor
positions as well as the normal cursor position.
(Curiously, the problem doesn't happen on my home Debian box, even
without this fix. It happens on my RH9 box at work, though.)
[originally from svn r7911]
2008-03-07 18:30:37 +00:00
|
|
|
if (term->alt_savecurs.y < 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_savecurs.y = 0;
|
Fix a cursor positioning infelicity.
The scenario: I start a small, say 80x24, pterm. I do some work in
it, generating plenty of scrollback, and eventually I `less' a file.
`less' switches to the alt screen. Then I want more vertical space
to look at the file, so I enlarge the window to more like 80x60.
When I quit `less' and switch back to the primary screen, some
scrollback has been pulled down into the screen, as expected - but
the saved _cursor position_ is still at line 24, not at the bottom
of the new terminal where the prompt it goes with has moved to.
Solution: term_size() should adjust the alt-screen saved cursor
positions as well as the normal cursor position.
(Curiously, the problem doesn't happen on my home Debian box, even
without this fix. It happens on my RH9 box at work, though.)
[originally from svn r7911]
2008-03-07 18:30:37 +00:00
|
|
|
if (term->alt_savecurs.y >= newrows)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_savecurs.y = newrows - 1;
|
Fix a cursor positioning infelicity.
The scenario: I start a small, say 80x24, pterm. I do some work in
it, generating plenty of scrollback, and eventually I `less' a file.
`less' switches to the alt screen. Then I want more vertical space
to look at the file, so I enlarge the window to more like 80x60.
When I quit `less' and switch back to the primary screen, some
scrollback has been pulled down into the screen, as expected - but
the saved _cursor position_ is still at line 24, not at the bottom
of the new terminal where the prompt it goes with has moved to.
Solution: term_size() should adjust the alt-screen saved cursor
positions as well as the normal cursor position.
(Curiously, the problem doesn't happen on my home Debian box, even
without this fix. It happens on my RH9 box at work, though.)
[originally from svn r7911]
2008-03-07 18:30:37 +00:00
|
|
|
if (term->alt_savecurs.x >= newcols)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_savecurs.x = newcols - 1;
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->curs.y < 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curs.y = 0;
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->curs.y >= newrows)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curs.y = newrows - 1;
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->curs.x >= newcols)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curs.x = newcols - 1;
|
Fix a cursor positioning infelicity.
The scenario: I start a small, say 80x24, pterm. I do some work in
it, generating plenty of scrollback, and eventually I `less' a file.
`less' switches to the alt screen. Then I want more vertical space
to look at the file, so I enlarge the window to more like 80x60.
When I quit `less' and switch back to the primary screen, some
scrollback has been pulled down into the screen, as expected - but
the saved _cursor position_ is still at line 24, not at the bottom
of the new terminal where the prompt it goes with has moved to.
Solution: term_size() should adjust the alt-screen saved cursor
positions as well as the normal cursor position.
(Curiously, the problem doesn't happen on my home Debian box, even
without this fix. It happens on my RH9 box at work, though.)
[originally from svn r7911]
2008-03-07 18:30:37 +00:00
|
|
|
if (term->alt_y < 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_y = 0;
|
Fix a cursor positioning infelicity.
The scenario: I start a small, say 80x24, pterm. I do some work in
it, generating plenty of scrollback, and eventually I `less' a file.
`less' switches to the alt screen. Then I want more vertical space
to look at the file, so I enlarge the window to more like 80x60.
When I quit `less' and switch back to the primary screen, some
scrollback has been pulled down into the screen, as expected - but
the saved _cursor position_ is still at line 24, not at the bottom
of the new terminal where the prompt it goes with has moved to.
Solution: term_size() should adjust the alt-screen saved cursor
positions as well as the normal cursor position.
(Curiously, the problem doesn't happen on my home Debian box, even
without this fix. It happens on my RH9 box at work, though.)
[originally from svn r7911]
2008-03-07 18:30:37 +00:00
|
|
|
if (term->alt_y >= newrows)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_y = newrows - 1;
|
Fix a cursor positioning infelicity.
The scenario: I start a small, say 80x24, pterm. I do some work in
it, generating plenty of scrollback, and eventually I `less' a file.
`less' switches to the alt screen. Then I want more vertical space
to look at the file, so I enlarge the window to more like 80x60.
When I quit `less' and switch back to the primary screen, some
scrollback has been pulled down into the screen, as expected - but
the saved _cursor position_ is still at line 24, not at the bottom
of the new terminal where the prompt it goes with has moved to.
Solution: term_size() should adjust the alt-screen saved cursor
positions as well as the normal cursor position.
(Curiously, the problem doesn't happen on my home Debian box, even
without this fix. It happens on my RH9 box at work, though.)
[originally from svn r7911]
2008-03-07 18:30:37 +00:00
|
|
|
if (term->alt_x >= newcols)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_x = newcols - 1;
|
2002-10-22 16:11:33 +00:00
|
|
|
term->alt_x = term->alt_y = 0;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->wrapnext = false;
|
|
|
|
term->alt_wnext = false;
|
2002-10-22 16:11:33 +00:00
|
|
|
|
|
|
|
term->rows = newrows;
|
|
|
|
term->cols = newcols;
|
|
|
|
term->savelines = newsavelines;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2018-10-29 19:50:29 +00:00
|
|
|
swap_screen(term, save_alt_which, false, false);
|
2000-03-11 14:10:10 +00:00
|
|
|
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_scrollbar_update_pending = true;
|
|
|
|
term_schedule_update(term);
|
2018-09-11 15:23:38 +00:00
|
|
|
if (term->backend)
|
|
|
|
backend_size(term->backend, term->cols, term->rows);
|
2002-10-25 11:30:33 +00:00
|
|
|
}
|
|
|
|
|
Move window resize timeouts into the GTK frontend.
In the changes around commit 420fe75552afa94, I made the terminal
suspend output processing while it waited for a term_size() callback
in response to a resize request. Because on X11 there are unusual
circumstances in which you never receive that callback, I also added a
last-ditch 5-second timeout, so that eventually we'll resume terminal
output processing regardless.
But the timeout lives in terminal.c, in the cross-platform code. This
is pointless on Windows (where resize processing is synchronous, so we
always finish it before the timer code next gets called anyway), but I
decided it was easier to keep the whole mechanism in terminal.c in the
absence of a good reason not to.
Now I've found that reason. We _also_ generate window resizes locally
to the GTK front end, in response to the key combinations that change
the font size, and _those_ still have an asynchrony problem.
So, to begin with, I'm refactoring the request_resize system so that
now there's an explicit callback from the frontend to the terminal to
say 'Your resize request has now been processed, whether or not you've
received a term_size() call'. On Windows, this simplifies matters
greatly because we always know exactly when to call that, and don't
have to keep a 'have we called term_size() already?' flag. On GTK, the
timing complexity previously in terminal.c has moved into window.c.
No functional change (I hope). The payoff will be in the next commit.
2022-05-12 17:16:56 +00:00
|
|
|
void term_resize_request_completed(Terminal *term)
|
|
|
|
{
|
|
|
|
assert(term->win_resize_pending == WIN_RESIZE_AWAIT_REPLY);
|
|
|
|
term->win_resize_pending = WIN_RESIZE_NO;
|
|
|
|
queue_toplevel_callback(term_out_cb, term);
|
|
|
|
}
|
|
|
|
|
2002-10-25 11:30:33 +00:00
|
|
|
/*
|
2018-09-11 15:23:38 +00:00
|
|
|
* Hand a backend to the terminal, so it can be notified of resizes.
|
2002-10-25 11:30:33 +00:00
|
|
|
*/
|
2018-09-11 15:23:38 +00:00
|
|
|
void term_provide_backend(Terminal *term, Backend *backend)
|
2002-10-25 11:30:33 +00:00
|
|
|
{
|
2018-09-11 15:23:38 +00:00
|
|
|
term->backend = backend;
|
Fix assertion failure on Restart Session.
This occurred if the SSH server closed the connection for any
reason (in practice usually a timeout, but reproducible more easily by
manually killing a test server process) while the user was in the
middle of any kind of interactive prompt-based login in the GUI PuTTY
terminal (be it simple password, k-i, private key passphrase,
whatever).
The problem was that term->userpass_state wasn't cleaned up when the
connection died, and then if you started a fresh SSH session in the
same terminal, the attempt to create a new term->userpass_state would
find there was one already there.
The simplest place to insert the missing cleanup is the call to
term_provide_backend(), because that's a terminal API function which
is already called to notify the terminal that one backend has gone
away and the next one has turned up.
(In fact, it's called twice, once to set term->backend to NULL when
the first session closes, and again when the session is restarted. I
see no harm in making the cleanup unconditional, not bothering to tell
the difference between the two cases.)
2024-12-14 11:44:28 +00:00
|
|
|
if (term->userpass_state)
|
|
|
|
term_userpass_state_free(term->userpass_state);
|
2018-09-11 15:23:38 +00:00
|
|
|
if (term->backend && term->cols > 0 && term->rows > 0)
|
|
|
|
backend_size(term->backend, term->cols, term->rows);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2003-03-06 12:51:12 +00:00
|
|
|
/* Find the bottom line on the screen that has any content.
|
|
|
|
* If only the top line has content, returns 0.
|
|
|
|
* If no lines have content, return -1.
|
2019-09-08 19:29:00 +00:00
|
|
|
*/
|
2022-08-03 19:48:46 +00:00
|
|
|
static int find_last_nonempty_line(Terminal *term, tree234 *screen)
|
2003-03-06 12:51:12 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = count234(screen) - 1; i >= 0; i--) {
|
2019-09-08 19:29:00 +00:00
|
|
|
termline *line = index234(screen, i);
|
|
|
|
int j;
|
|
|
|
for (j = 0; j < line->cols; j++)
|
|
|
|
if (!termchars_equal(&line->chars[j], &term->erase_char))
|
|
|
|
break;
|
|
|
|
if (j != line->cols) break;
|
2003-03-06 12:51:12 +00:00
|
|
|
}
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
2018-10-29 19:50:29 +00:00
|
|
|
* Swap screens. If `reset' is true and we have been asked to
|
2002-10-22 10:31:23 +00:00
|
|
|
* switch to the alternate screen, we must bring most of its
|
|
|
|
* configuration from the main screen and erase the contents of the
|
|
|
|
* alternate screen completely. (This is even true if we're already
|
|
|
|
* on it! Blame xterm.)
|
1999-01-08 13:02:13 +00:00
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void swap_screen(Terminal *term, int which,
|
|
|
|
bool reset, bool keep_cur_pos)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
1999-01-08 13:02:13 +00:00
|
|
|
int t;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool bt;
|
2006-08-15 12:45:21 +00:00
|
|
|
pos tp;
|
2017-10-08 12:45:08 +00:00
|
|
|
truecolour ttc;
|
2001-04-16 21:25:13 +00:00
|
|
|
tree234 *ttr;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2002-10-22 10:31:23 +00:00
|
|
|
if (!which)
|
2019-09-08 19:29:00 +00:00
|
|
|
reset = false; /* do no weird resetting if which==0 */
|
2002-10-22 10:31:23 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
if (which != term->alt_which) {
|
2019-12-30 23:23:42 +00:00
|
|
|
if (term->erase_to_scrollback && term->alt_screen &&
|
|
|
|
term->alt_which && term->disptop < 0) {
|
|
|
|
/*
|
|
|
|
* We're swapping away from the alternate screen, so some
|
|
|
|
* lines are about to vanish from the virtual scrollback.
|
|
|
|
* Adjust disptop by that much, so that (if we're not
|
|
|
|
* resetting the scrollback anyway on a display event) the
|
|
|
|
* current scroll position still ends up pointing at the
|
|
|
|
* same text.
|
|
|
|
*/
|
|
|
|
term->disptop += term->alt_sblines;
|
|
|
|
if (term->disptop > 0)
|
|
|
|
term->disptop = 0;
|
|
|
|
}
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
term->alt_which = which;
|
|
|
|
|
|
|
|
ttr = term->alt_screen;
|
|
|
|
term->alt_screen = term->screen;
|
|
|
|
term->screen = ttr;
|
2020-06-16 17:02:39 +00:00
|
|
|
term->alt_sblines = (
|
|
|
|
term->alt_screen ?
|
|
|
|
find_last_nonempty_line(term, term->alt_screen) + 1 : 0);
|
2019-09-08 19:29:00 +00:00
|
|
|
t = term->curs.x;
|
|
|
|
if (!reset && !keep_cur_pos)
|
|
|
|
term->curs.x = term->alt_x;
|
|
|
|
term->alt_x = t;
|
|
|
|
t = term->curs.y;
|
|
|
|
if (!reset && !keep_cur_pos)
|
|
|
|
term->curs.y = term->alt_y;
|
|
|
|
term->alt_y = t;
|
|
|
|
t = term->marg_t;
|
|
|
|
if (!reset) term->marg_t = term->alt_t;
|
|
|
|
term->alt_t = t;
|
|
|
|
t = term->marg_b;
|
|
|
|
if (!reset) term->marg_b = term->alt_b;
|
|
|
|
term->alt_b = t;
|
|
|
|
bt = term->dec_om;
|
|
|
|
if (!reset) term->dec_om = term->alt_om;
|
|
|
|
term->alt_om = bt;
|
|
|
|
bt = term->wrap;
|
|
|
|
if (!reset) term->wrap = term->alt_wrap;
|
|
|
|
term->alt_wrap = bt;
|
|
|
|
bt = term->wrapnext;
|
|
|
|
if (!reset) term->wrapnext = term->alt_wnext;
|
|
|
|
term->alt_wnext = bt;
|
|
|
|
bt = term->insert;
|
|
|
|
if (!reset) term->insert = term->alt_ins;
|
|
|
|
term->alt_ins = bt;
|
|
|
|
t = term->cset;
|
|
|
|
if (!reset) term->cset = term->alt_cset;
|
|
|
|
term->alt_cset = t;
|
|
|
|
bt = term->utf;
|
|
|
|
if (!reset) term->utf = term->alt_utf;
|
|
|
|
term->alt_utf = bt;
|
|
|
|
t = term->sco_acs;
|
|
|
|
if (!reset) term->sco_acs = term->alt_sco_acs;
|
|
|
|
term->alt_sco_acs = t;
|
|
|
|
|
|
|
|
tp = term->savecurs;
|
Fix cursor save/restore with [?1047 alt-screen sequences.
A long time ago, in commit 09f86ce7e, I introduced a separate copy of
the saved cursor position (used by the ESC 7 / ESC 8 sequences) for
the main and alternate screens. The idea was to fix mishandling of an
input sequence of the form
ESC 7 (save cursor)
ESC [?47h (switch to alternate screen)
...
ESC 7 ESC 8 (save and restore cursor, while in alternate screen)
...
ESC [?47l (switch back from alternate screen)
ESC 8 (restore cursor, expecting it to match the _first_ ESC 7)
in which, before the fix, the second ESC 7 would overwrite the
position saved by the first one. So the final ESC 8 would restore the
cursor position to wherever it happened to have been saved in the
alternate screen, instead of where it was saved before switching _to_
the alternate screen.
I've recently noticed that the same bug still happens if you use the
alternative escape sequences ESC[?1047h and ESC[?1047l to switch to
the alternate screen, instead of ESC[?47h and ESC[?47l. This is
because that version of the escape sequence sets the internal flag
'keep_cur_pos' in the call to swap_screen, whose job is to arrange
that the actual cursor position doesn't change at the instant of the
switch. But the code that swaps the _saved_ cursor position in and out
is also conditioned on keep_cur_pos, so the 1047 variant of the
screen-swap sequence was bypassing that too, and behaving as if there
was just a single saved cursor position inside and outside the
alternate screen.
I don't know why I did it that way in 2006. It could have been
deliberate for some reason, or it could just have been mindless copy
and paste from the existing cursor-related swap code. But checking
with xterm now, it definitely seems to be wrong: the 1047 screen swap
preserves the _actual_ cursor position across the swap, but still has
independent _saved_ cursor positions in the two screens. So now PuTTY
does the same.
2019-12-24 10:52:38 +00:00
|
|
|
if (!reset)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->savecurs = term->alt_savecurs;
|
|
|
|
term->alt_savecurs = tp;
|
2006-08-15 12:45:21 +00:00
|
|
|
t = term->save_cset;
|
Fix cursor save/restore with [?1047 alt-screen sequences.
A long time ago, in commit 09f86ce7e, I introduced a separate copy of
the saved cursor position (used by the ESC 7 / ESC 8 sequences) for
the main and alternate screens. The idea was to fix mishandling of an
input sequence of the form
ESC 7 (save cursor)
ESC [?47h (switch to alternate screen)
...
ESC 7 ESC 8 (save and restore cursor, while in alternate screen)
...
ESC [?47l (switch back from alternate screen)
ESC 8 (restore cursor, expecting it to match the _first_ ESC 7)
in which, before the fix, the second ESC 7 would overwrite the
position saved by the first one. So the final ESC 8 would restore the
cursor position to wherever it happened to have been saved in the
alternate screen, instead of where it was saved before switching _to_
the alternate screen.
I've recently noticed that the same bug still happens if you use the
alternative escape sequences ESC[?1047h and ESC[?1047l to switch to
the alternate screen, instead of ESC[?47h and ESC[?47l. This is
because that version of the escape sequence sets the internal flag
'keep_cur_pos' in the call to swap_screen, whose job is to arrange
that the actual cursor position doesn't change at the instant of the
switch. But the code that swaps the _saved_ cursor position in and out
is also conditioned on keep_cur_pos, so the 1047 variant of the
screen-swap sequence was bypassing that too, and behaving as if there
was just a single saved cursor position inside and outside the
alternate screen.
I don't know why I did it that way in 2006. It could have been
deliberate for some reason, or it could just have been mindless copy
and paste from the existing cursor-related swap code. But checking
with xterm now, it definitely seems to be wrong: the 1047 screen swap
preserves the _actual_ cursor position across the swap, but still has
independent _saved_ cursor positions in the two screens. So now PuTTY
does the same.
2019-12-24 10:52:38 +00:00
|
|
|
if (!reset)
|
2006-08-15 12:45:21 +00:00
|
|
|
term->save_cset = term->alt_save_cset;
|
|
|
|
term->alt_save_cset = t;
|
|
|
|
t = term->save_csattr;
|
Fix cursor save/restore with [?1047 alt-screen sequences.
A long time ago, in commit 09f86ce7e, I introduced a separate copy of
the saved cursor position (used by the ESC 7 / ESC 8 sequences) for
the main and alternate screens. The idea was to fix mishandling of an
input sequence of the form
ESC 7 (save cursor)
ESC [?47h (switch to alternate screen)
...
ESC 7 ESC 8 (save and restore cursor, while in alternate screen)
...
ESC [?47l (switch back from alternate screen)
ESC 8 (restore cursor, expecting it to match the _first_ ESC 7)
in which, before the fix, the second ESC 7 would overwrite the
position saved by the first one. So the final ESC 8 would restore the
cursor position to wherever it happened to have been saved in the
alternate screen, instead of where it was saved before switching _to_
the alternate screen.
I've recently noticed that the same bug still happens if you use the
alternative escape sequences ESC[?1047h and ESC[?1047l to switch to
the alternate screen, instead of ESC[?47h and ESC[?47l. This is
because that version of the escape sequence sets the internal flag
'keep_cur_pos' in the call to swap_screen, whose job is to arrange
that the actual cursor position doesn't change at the instant of the
switch. But the code that swaps the _saved_ cursor position in and out
is also conditioned on keep_cur_pos, so the 1047 variant of the
screen-swap sequence was bypassing that too, and behaving as if there
was just a single saved cursor position inside and outside the
alternate screen.
I don't know why I did it that way in 2006. It could have been
deliberate for some reason, or it could just have been mindless copy
and paste from the existing cursor-related swap code. But checking
with xterm now, it definitely seems to be wrong: the 1047 screen swap
preserves the _actual_ cursor position across the swap, but still has
independent _saved_ cursor positions in the two screens. So now PuTTY
does the same.
2019-12-24 10:52:38 +00:00
|
|
|
if (!reset)
|
2006-08-15 12:45:21 +00:00
|
|
|
term->save_csattr = term->alt_save_csattr;
|
|
|
|
term->alt_save_csattr = t;
|
2006-08-15 22:48:01 +00:00
|
|
|
t = term->save_attr;
|
Fix cursor save/restore with [?1047 alt-screen sequences.
A long time ago, in commit 09f86ce7e, I introduced a separate copy of
the saved cursor position (used by the ESC 7 / ESC 8 sequences) for
the main and alternate screens. The idea was to fix mishandling of an
input sequence of the form
ESC 7 (save cursor)
ESC [?47h (switch to alternate screen)
...
ESC 7 ESC 8 (save and restore cursor, while in alternate screen)
...
ESC [?47l (switch back from alternate screen)
ESC 8 (restore cursor, expecting it to match the _first_ ESC 7)
in which, before the fix, the second ESC 7 would overwrite the
position saved by the first one. So the final ESC 8 would restore the
cursor position to wherever it happened to have been saved in the
alternate screen, instead of where it was saved before switching _to_
the alternate screen.
I've recently noticed that the same bug still happens if you use the
alternative escape sequences ESC[?1047h and ESC[?1047l to switch to
the alternate screen, instead of ESC[?47h and ESC[?47l. This is
because that version of the escape sequence sets the internal flag
'keep_cur_pos' in the call to swap_screen, whose job is to arrange
that the actual cursor position doesn't change at the instant of the
switch. But the code that swaps the _saved_ cursor position in and out
is also conditioned on keep_cur_pos, so the 1047 variant of the
screen-swap sequence was bypassing that too, and behaving as if there
was just a single saved cursor position inside and outside the
alternate screen.
I don't know why I did it that way in 2006. It could have been
deliberate for some reason, or it could just have been mindless copy
and paste from the existing cursor-related swap code. But checking
with xterm now, it definitely seems to be wrong: the 1047 screen swap
preserves the _actual_ cursor position across the swap, but still has
independent _saved_ cursor positions in the two screens. So now PuTTY
does the same.
2019-12-24 10:52:38 +00:00
|
|
|
if (!reset)
|
2006-08-15 22:48:01 +00:00
|
|
|
term->save_attr = term->alt_save_attr;
|
|
|
|
term->alt_save_attr = t;
|
2017-10-08 12:45:08 +00:00
|
|
|
ttc = term->save_truecolour;
|
Fix cursor save/restore with [?1047 alt-screen sequences.
A long time ago, in commit 09f86ce7e, I introduced a separate copy of
the saved cursor position (used by the ESC 7 / ESC 8 sequences) for
the main and alternate screens. The idea was to fix mishandling of an
input sequence of the form
ESC 7 (save cursor)
ESC [?47h (switch to alternate screen)
...
ESC 7 ESC 8 (save and restore cursor, while in alternate screen)
...
ESC [?47l (switch back from alternate screen)
ESC 8 (restore cursor, expecting it to match the _first_ ESC 7)
in which, before the fix, the second ESC 7 would overwrite the
position saved by the first one. So the final ESC 8 would restore the
cursor position to wherever it happened to have been saved in the
alternate screen, instead of where it was saved before switching _to_
the alternate screen.
I've recently noticed that the same bug still happens if you use the
alternative escape sequences ESC[?1047h and ESC[?1047l to switch to
the alternate screen, instead of ESC[?47h and ESC[?47l. This is
because that version of the escape sequence sets the internal flag
'keep_cur_pos' in the call to swap_screen, whose job is to arrange
that the actual cursor position doesn't change at the instant of the
switch. But the code that swaps the _saved_ cursor position in and out
is also conditioned on keep_cur_pos, so the 1047 variant of the
screen-swap sequence was bypassing that too, and behaving as if there
was just a single saved cursor position inside and outside the
alternate screen.
I don't know why I did it that way in 2006. It could have been
deliberate for some reason, or it could just have been mindless copy
and paste from the existing cursor-related swap code. But checking
with xterm now, it definitely seems to be wrong: the 1047 screen swap
preserves the _actual_ cursor position across the swap, but still has
independent _saved_ cursor positions in the two screens. So now PuTTY
does the same.
2019-12-24 10:52:38 +00:00
|
|
|
if (!reset)
|
2017-10-08 12:45:08 +00:00
|
|
|
term->save_truecolour = term->alt_save_truecolour;
|
|
|
|
term->alt_save_truecolour = ttc;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bt = term->save_utf;
|
Fix cursor save/restore with [?1047 alt-screen sequences.
A long time ago, in commit 09f86ce7e, I introduced a separate copy of
the saved cursor position (used by the ESC 7 / ESC 8 sequences) for
the main and alternate screens. The idea was to fix mishandling of an
input sequence of the form
ESC 7 (save cursor)
ESC [?47h (switch to alternate screen)
...
ESC 7 ESC 8 (save and restore cursor, while in alternate screen)
...
ESC [?47l (switch back from alternate screen)
ESC 8 (restore cursor, expecting it to match the _first_ ESC 7)
in which, before the fix, the second ESC 7 would overwrite the
position saved by the first one. So the final ESC 8 would restore the
cursor position to wherever it happened to have been saved in the
alternate screen, instead of where it was saved before switching _to_
the alternate screen.
I've recently noticed that the same bug still happens if you use the
alternative escape sequences ESC[?1047h and ESC[?1047l to switch to
the alternate screen, instead of ESC[?47h and ESC[?47l. This is
because that version of the escape sequence sets the internal flag
'keep_cur_pos' in the call to swap_screen, whose job is to arrange
that the actual cursor position doesn't change at the instant of the
switch. But the code that swaps the _saved_ cursor position in and out
is also conditioned on keep_cur_pos, so the 1047 variant of the
screen-swap sequence was bypassing that too, and behaving as if there
was just a single saved cursor position inside and outside the
alternate screen.
I don't know why I did it that way in 2006. It could have been
deliberate for some reason, or it could just have been mindless copy
and paste from the existing cursor-related swap code. But checking
with xterm now, it definitely seems to be wrong: the 1047 screen swap
preserves the _actual_ cursor position across the swap, but still has
independent _saved_ cursor positions in the two screens. So now PuTTY
does the same.
2019-12-24 10:52:38 +00:00
|
|
|
if (!reset)
|
2006-08-15 12:45:21 +00:00
|
|
|
term->save_utf = term->alt_save_utf;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->alt_save_utf = bt;
|
|
|
|
bt = term->save_wnext;
|
Fix cursor save/restore with [?1047 alt-screen sequences.
A long time ago, in commit 09f86ce7e, I introduced a separate copy of
the saved cursor position (used by the ESC 7 / ESC 8 sequences) for
the main and alternate screens. The idea was to fix mishandling of an
input sequence of the form
ESC 7 (save cursor)
ESC [?47h (switch to alternate screen)
...
ESC 7 ESC 8 (save and restore cursor, while in alternate screen)
...
ESC [?47l (switch back from alternate screen)
ESC 8 (restore cursor, expecting it to match the _first_ ESC 7)
in which, before the fix, the second ESC 7 would overwrite the
position saved by the first one. So the final ESC 8 would restore the
cursor position to wherever it happened to have been saved in the
alternate screen, instead of where it was saved before switching _to_
the alternate screen.
I've recently noticed that the same bug still happens if you use the
alternative escape sequences ESC[?1047h and ESC[?1047l to switch to
the alternate screen, instead of ESC[?47h and ESC[?47l. This is
because that version of the escape sequence sets the internal flag
'keep_cur_pos' in the call to swap_screen, whose job is to arrange
that the actual cursor position doesn't change at the instant of the
switch. But the code that swaps the _saved_ cursor position in and out
is also conditioned on keep_cur_pos, so the 1047 variant of the
screen-swap sequence was bypassing that too, and behaving as if there
was just a single saved cursor position inside and outside the
alternate screen.
I don't know why I did it that way in 2006. It could have been
deliberate for some reason, or it could just have been mindless copy
and paste from the existing cursor-related swap code. But checking
with xterm now, it definitely seems to be wrong: the 1047 screen swap
preserves the _actual_ cursor position across the swap, but still has
independent _saved_ cursor positions in the two screens. So now PuTTY
does the same.
2019-12-24 10:52:38 +00:00
|
|
|
if (!reset)
|
2006-08-15 12:45:21 +00:00
|
|
|
term->save_wnext = term->alt_save_wnext;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
term->alt_save_wnext = bt;
|
2006-08-15 12:45:21 +00:00
|
|
|
t = term->save_sco_acs;
|
Fix cursor save/restore with [?1047 alt-screen sequences.
A long time ago, in commit 09f86ce7e, I introduced a separate copy of
the saved cursor position (used by the ESC 7 / ESC 8 sequences) for
the main and alternate screens. The idea was to fix mishandling of an
input sequence of the form
ESC 7 (save cursor)
ESC [?47h (switch to alternate screen)
...
ESC 7 ESC 8 (save and restore cursor, while in alternate screen)
...
ESC [?47l (switch back from alternate screen)
ESC 8 (restore cursor, expecting it to match the _first_ ESC 7)
in which, before the fix, the second ESC 7 would overwrite the
position saved by the first one. So the final ESC 8 would restore the
cursor position to wherever it happened to have been saved in the
alternate screen, instead of where it was saved before switching _to_
the alternate screen.
I've recently noticed that the same bug still happens if you use the
alternative escape sequences ESC[?1047h and ESC[?1047l to switch to
the alternate screen, instead of ESC[?47h and ESC[?47l. This is
because that version of the escape sequence sets the internal flag
'keep_cur_pos' in the call to swap_screen, whose job is to arrange
that the actual cursor position doesn't change at the instant of the
switch. But the code that swaps the _saved_ cursor position in and out
is also conditioned on keep_cur_pos, so the 1047 variant of the
screen-swap sequence was bypassing that too, and behaving as if there
was just a single saved cursor position inside and outside the
alternate screen.
I don't know why I did it that way in 2006. It could have been
deliberate for some reason, or it could just have been mindless copy
and paste from the existing cursor-related swap code. But checking
with xterm now, it definitely seems to be wrong: the 1047 screen swap
preserves the _actual_ cursor position across the swap, but still has
independent _saved_ cursor positions in the two screens. So now PuTTY
does the same.
2019-12-24 10:52:38 +00:00
|
|
|
if (!reset)
|
2006-08-15 12:45:21 +00:00
|
|
|
term->save_sco_acs = term->alt_save_sco_acs;
|
|
|
|
term->alt_save_sco_acs = t;
|
2019-12-30 23:23:42 +00:00
|
|
|
|
|
|
|
if (term->erase_to_scrollback && term->alt_screen &&
|
|
|
|
term->alt_which && term->disptop < 0) {
|
|
|
|
/*
|
|
|
|
* Inverse of the adjustment at the top of this function.
|
|
|
|
* This time, we're swapping _to_ the alternate screen, so
|
|
|
|
* some lines are about to _appear_ in the virtual
|
|
|
|
* scrollback, and we adjust disptop in the other
|
|
|
|
* direction.
|
|
|
|
*
|
|
|
|
* Both these adjustments depend on the value stored in
|
|
|
|
* term->alt_sblines while the alt screen is selected,
|
|
|
|
* which is why we had to do one _before_ switching away
|
|
|
|
* from it and the other _after_ switching to it.
|
|
|
|
*/
|
|
|
|
term->disptop -= term->alt_sblines;
|
|
|
|
int limit = -sblines(term);
|
|
|
|
if (term->disptop < limit)
|
|
|
|
term->disptop = limit;
|
|
|
|
}
|
2002-10-22 10:31:23 +00:00
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
if (reset && term->screen) {
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* Yes, this _is_ supposed to honour background-colour-erase.
|
|
|
|
*/
|
|
|
|
erase_lots(term, false, true, true);
|
2002-10-22 10:31:23 +00:00
|
|
|
}
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
|
|
|
|
seen_disp_event(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the scroll bar.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
static void update_sbar(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2003-03-06 12:51:12 +00:00
|
|
|
int nscroll = sblines(term);
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
win_set_scrollbar(term->win, nscroll + term->rows,
|
|
|
|
nscroll + term->disptop, term->rows);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the region bounded by the two pointers intersects
|
|
|
|
* the scroll region, and de-select the on-screen selection if so.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
static void check_selection(Terminal *term, pos from, pos to)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2002-10-22 16:11:33 +00:00
|
|
|
if (poslt(from, term->selend) && poslt(term->selstart, to))
|
2019-09-08 19:29:00 +00:00
|
|
|
deselect(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 18:36:20 +00:00
|
|
|
static void clear_line(Terminal *term, termline *line)
|
|
|
|
{
|
2019-03-10 18:31:46 +00:00
|
|
|
resizeline(term, line, term->cols);
|
2019-02-26 18:36:20 +00:00
|
|
|
for (int i = 0; i < term->cols; i++)
|
|
|
|
copy_termchar(line, i, &term->erase_char);
|
|
|
|
line->lattr = LATTR_NORM;
|
|
|
|
}
|
|
|
|
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
static void check_trust_status(Terminal *term, termline *line)
|
|
|
|
{
|
|
|
|
if (line->trusted != term->trusted) {
|
|
|
|
/*
|
|
|
|
* If we're displaying trusted output on a previously
|
|
|
|
* untrusted line, or vice versa, we need to switch the
|
|
|
|
* 'trusted' attribute on this terminal line, and also clear
|
|
|
|
* all its previous contents.
|
|
|
|
*/
|
|
|
|
clear_line(term, line);
|
|
|
|
line->trusted = term->trusted;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Scroll the screen. (`lines' is +ve for scrolling forward, -ve
|
2018-10-29 19:50:29 +00:00
|
|
|
* for backward.) `sb' is true if the scrolling is permitted to
|
1999-01-08 13:02:13 +00:00
|
|
|
* affect the scrollback buffer.
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void scroll(Terminal *term, int topline, int botline,
|
|
|
|
int lines, bool sb)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline *line;
|
2019-02-26 18:36:20 +00:00
|
|
|
int seltop, scrollwinsize;
|
2000-07-26 12:13:51 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
if (topline != 0 || term->alt_which != 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
sb = false;
|
2001-04-16 21:25:13 +00:00
|
|
|
|
2012-09-23 15:36:54 +00:00
|
|
|
scrollwinsize = botline - topline + 1;
|
|
|
|
|
2001-04-16 21:25:13 +00:00
|
|
|
if (lines < 0) {
|
2012-09-23 15:36:54 +00:00
|
|
|
lines = -lines;
|
|
|
|
if (lines > scrollwinsize)
|
|
|
|
lines = scrollwinsize;
|
2019-09-08 19:29:00 +00:00
|
|
|
while (lines-- > 0) {
|
|
|
|
line = delpos234(term->screen, botline);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
resizeline(term, line, term->cols);
|
2019-02-26 18:36:20 +00:00
|
|
|
clear_line(term, line);
|
2019-09-08 19:29:00 +00:00
|
|
|
addpos234(term->screen, line, topline);
|
|
|
|
|
|
|
|
if (term->selstart.y >= topline && term->selstart.y <= botline) {
|
|
|
|
term->selstart.y++;
|
|
|
|
if (term->selstart.y > botline) {
|
|
|
|
term->selstart.y = botline + 1;
|
|
|
|
term->selstart.x = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (term->selend.y >= topline && term->selend.y <= botline) {
|
|
|
|
term->selend.y++;
|
|
|
|
if (term->selend.y > botline) {
|
|
|
|
term->selend.y = botline + 1;
|
|
|
|
term->selend.x = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
} else {
|
2012-09-23 15:36:54 +00:00
|
|
|
if (lines > scrollwinsize)
|
|
|
|
lines = scrollwinsize;
|
2019-09-08 19:29:00 +00:00
|
|
|
while (lines-- > 0) {
|
|
|
|
line = delpos234(term->screen, topline);
|
2005-04-01 13:25:13 +00:00
|
|
|
#ifdef TERM_CC_DIAGS
|
2019-09-08 19:29:00 +00:00
|
|
|
cc_check(line);
|
2005-04-01 13:25:13 +00:00
|
|
|
#endif
|
2019-09-08 19:29:00 +00:00
|
|
|
if (sb && term->savelines > 0) {
|
|
|
|
int sblen = count234(term->scrollback);
|
|
|
|
/*
|
|
|
|
* We must add this line to the scrollback. We'll
|
|
|
|
* remove a line from the top of the scrollback if
|
|
|
|
* the scrollback is full.
|
|
|
|
*/
|
|
|
|
if (sblen == term->savelines) {
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
compressed_scrollback_line *cline;
|
2019-09-08 19:29:00 +00:00
|
|
|
|
|
|
|
sblen--;
|
|
|
|
cline = delpos234(term->scrollback, 0);
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
free_compressed_line(cline);
|
2019-09-08 19:29:00 +00:00
|
|
|
} else
|
|
|
|
term->tempsblines += 1;
|
|
|
|
|
Build option to disable scrollback compression.
This was requested by a downstream of the code, who wanted to change
the time/space tradeoff in the terminal. I currently have no plans to
change this setting for upstream PuTTY, although there is a cmake
option for it just to make testing it easy.
To avoid sprinkling ifdefs over the whole terminal code, the strategy
is to keep the separate type 'compressed_scrollback_line', and turn it
into a typedef for a 'termline *'. So compressline() becomes almost
trivial, and decompressline() even more so.
Memory management is the fiddly part. To make this work sensibly on
both sides, I've broken up each of compressline() and decompressline()
into two versions, one of which takes ownership of (and logically
speaking frees) its input, and the other doesn't. So at call sites
where a function was followed by a free, it's now calling the
'and_free' version of the function, and where the input object was
reused afterwards, it's calling the 'no_free' version. This means that
in different branches of the #if, I can make one function call the
other or vice versa, and no call site is stuck with having to do
things in a more roundabout way than necessary.
The freeing of the _return_ value from decompressline() is handled for
us, because termlines already have a 'temporary' flag which is set
when they're returned from the decompressor, and anyone receiving a
termline from lineptr() calls unlineptr() when they're finished with
it, which will _conditionally_ free it, depending on that 'temporary'
flag. So in the new mode, 'temporary' is never set at all, and all
those unlineptr() calls do nothing.
However, we also still need to free compressed lines properly when
they're actually being thrown away (scrolled off the top of the
scrollback, or cleaned up in term_free), and for that, I've made a new
special-purpose free_compressed_line() function.
2022-11-20 10:55:33 +00:00
|
|
|
addpos234(term->scrollback, compressline_no_free(line), sblen);
|
2019-09-08 19:29:00 +00:00
|
|
|
|
|
|
|
/* now `line' itself can be reused as the bottom line */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the user is currently looking at part of the
|
|
|
|
* scrollback, and they haven't enabled any options
|
|
|
|
* that are going to reset the scrollback as a
|
|
|
|
* result of this movement, then the chances are
|
|
|
|
* they'd like to keep looking at the same line. So
|
|
|
|
* we move their viewpoint at the same rate as the
|
|
|
|
* scroll, at least until their viewpoint hits the
|
|
|
|
* top end of the scrollback buffer, at which point
|
|
|
|
* we don't have the choice any more.
|
|
|
|
*
|
|
|
|
* Thanks to Jan Holmen Holsten for the idea and
|
|
|
|
* initial implementation.
|
|
|
|
*/
|
|
|
|
if (term->disptop > -term->savelines && term->disptop < 0)
|
|
|
|
term->disptop--;
|
Rationalise the code that resets terminal scrollback.
Recently I encountered a CLI tool that took tens of seconds to run,
and produced no _visible_ output, but wrote ESC[0m to the terminal a
few times during its operation. (Probably by mistake. In other modes
it does print colourful messages, so I expect a 'reset colour' call
was accidentally outside the 'if' statement containing the rest of the
diagnostic it followed. Or something along those lines.)
I noticed this because every ESC[0m reset my pterm scrollback to the
bottom, which wasn't very helpful, and was unintentional on pterm's
part (as _well_ as on the part of the tool). But I can fix pterm!
At first glance the code _looked_ sensible: terminal.c contains calls
to seen_disp_event(term) whenever terminal output does something that
requires a redraw of the terminal window. Those are also the updates
that should count as 'reset scrollback on display activity'. And
ESC[0m, along with the rest of the SGR handler, correctly contained no
such call. So how did a display update happen at all?
The code was confusingly tangled up with the code that responds to
terminal activity by resetting the phase of the blinking cursor (if
any). term_reset_cblink() was calling seen_disp_event() (when surely
it should be the other way round!), and also, term_reset_cblink() was
called whenever _any_ terminal output data arrived. That combination
meant that any byte output to the terminal at all turned out to count
as display activity, whether or not it changed the screen contents.
Additionally, the other scrollback-reset flag, 'reset scrollback on
keypress', was handled by calling seen_disp_event() from the keyboard
handler. But display events and keyboard events are supposed to be
_independent_ potential causes of scrollback resets - it doesn't make
any sense to handle one by treating it as the other!
So I've reorganised the code completely:
- the seen_disp_event *flag* is now gone. Instead, the
seen_disp_event function tests the scroll_on_disp flag, and if set,
resets the scroll position immediately and sets the general
'scrollbar needs updating' flag.
- keyboard input is handled by doing exactly the same thing except
testing the scroll_on_key flag, so the two systems are properly
independent. That code calls term_schedule_update so that the
terminal will be redrawn as a result of the scroll, but doesn't
also call seen_disp_event() for the rest of the full treatment.
- the term_update code that does the scrollbar update is much
simpler, since now it only needs to test that one flag.
- I also had to set that flag explicitly in scroll() so that the
scrollbar would still be updated as a result of the scrollback size
changing. I think that must have been happening entirely by
accident before.
- term_reset_cblink is subsumed into seen_disp_event, so that only
_substantive_ display updates cause the cursor blink phase to reset
to the start of the solid period.
Result: if programs output no-op sequences like ESC[0m, or if you
press keys that don't echo, then the cursor will carry on blinking
normally, and (if you don't also have scroll_on_key set) the
scrollback won't be reset. And the code is slightly shorter than it
was before, and hopefully more sensible too.
(However, other classes of no-op activity _will_ still cause a cursor
blink phase change and a scrollback reset, such as sending a
cursor-positioning sequence that puts the cursor in the same place it
was already - even something as simple as ^M when already at the start
of the line. It might be nice to fix that, but it's much more
difficult: you'd have to either put a complicated and error-prone test
at every seen_disp_event call site, or else expensively diff the
entire visible terminal state against how it was before. And to avoid
a nondeterministic dependency on the terminal update cooldown, that
diff would have to be done at the granularity of individual control
sequences rather than a bounded number of times a second. I'd rather
not!)
2023-09-03 08:29:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We've just modified the data that the terminal's
|
|
|
|
* scrollbar is based on, so remember to update it.
|
|
|
|
*/
|
|
|
|
term->win_scrollbar_update_pending = true;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
resizeline(term, line, term->cols);
|
2019-02-26 18:36:20 +00:00
|
|
|
clear_line(term, line);
|
Fix trust-sigil handling when scrolling the terminal.
Previously, when we scrolled the terminal, the newly exposed line at
the bottom would be immediately allocated a trust status corresponding
to the current state of the terminal. So if you're in trusted mode and
you print a newline, then the line scrolled on at the bottom
immediately gets a trust sigil, whether you subsequently print
anything on it or not.
Up until now, that hasn't mattered, because we always _do_ print
something on it. But if you don't - if you send \r\n\r\n to
deliberately leave a blank line - then it turns out that's not what we
want after all, because if the screen _doesn't_ scroll, the
passed-over line remains completely blank, whereas if it does scroll
the blank line gets a trust sigil, which is inconsistent.
Now, terminal lines newly exposed by a scroll have untrusted status,
just the same as terminal lines that were present in the initial blank
screen. They only become trusted if you actually print at least one
character on them (whereupon check_trust_status will re-clear them
just in case). And this is now independent of whether the terminal has
scrolled or not.
2021-10-30 14:32:34 +00:00
|
|
|
line->trusted = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
addpos234(term->screen, line, botline);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the selection endpoints move into the scrollback,
|
|
|
|
* we keep them moving until they hit the top. However,
|
|
|
|
* of course, if the line _hasn't_ moved into the
|
|
|
|
* scrollback then we don't do this, and cut them off
|
|
|
|
* at the top of the scroll region.
|
|
|
|
*
|
|
|
|
* This applies to selstart and selend (for an existing
|
|
|
|
* selection), and also selanchor (for one being
|
|
|
|
* selected as we speak).
|
|
|
|
*/
|
|
|
|
seltop = sb ? -term->savelines : topline;
|
|
|
|
|
|
|
|
if (term->selstate != NO_SELECTION) {
|
|
|
|
if (term->selstart.y >= seltop &&
|
|
|
|
term->selstart.y <= botline) {
|
|
|
|
term->selstart.y--;
|
|
|
|
if (term->selstart.y < seltop) {
|
|
|
|
term->selstart.y = seltop;
|
|
|
|
term->selstart.x = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (term->selend.y >= seltop && term->selend.y <= botline) {
|
|
|
|
term->selend.y--;
|
|
|
|
if (term->selend.y < seltop) {
|
|
|
|
term->selend.y = seltop;
|
|
|
|
term->selend.x = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (term->selanchor.y >= seltop &&
|
|
|
|
term->selanchor.y <= botline) {
|
|
|
|
term->selanchor.y--;
|
|
|
|
if (term->selanchor.y < seltop) {
|
|
|
|
term->selanchor.y = seltop;
|
|
|
|
term->selanchor.x = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
|
|
|
|
seen_disp_event(term);
|
2003-02-01 12:26:33 +00:00
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Move the cursor to a given position, clipping at boundaries. We
|
|
|
|
* may or may not want to clip at the scroll margin: marg_clip is 0
|
|
|
|
* not to, 1 to disallow _passing_ the margins, and 2 to disallow
|
|
|
|
* even _being_ outside the margins.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
static void move(Terminal *term, int x, int y, int marg_clip)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
1999-01-08 13:02:13 +00:00
|
|
|
if (x < 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
x = 0;
|
2002-10-22 16:11:33 +00:00
|
|
|
if (x >= term->cols)
|
2019-09-08 19:29:00 +00:00
|
|
|
x = term->cols - 1;
|
1999-01-08 13:02:13 +00:00
|
|
|
if (marg_clip) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if ((term->curs.y >= term->marg_t || marg_clip == 2) &&
|
|
|
|
y < term->marg_t)
|
|
|
|
y = term->marg_t;
|
|
|
|
if ((term->curs.y <= term->marg_b || marg_clip == 2) &&
|
|
|
|
y > term->marg_b)
|
|
|
|
y = term->marg_b;
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
if (y < 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
y = 0;
|
2002-10-22 16:11:33 +00:00
|
|
|
if (y >= term->rows)
|
2019-09-08 19:29:00 +00:00
|
|
|
y = term->rows - 1;
|
2002-10-22 16:11:33 +00:00
|
|
|
term->curs.x = x;
|
|
|
|
term->curs.y = y;
|
2018-10-29 19:50:29 +00:00
|
|
|
term->wrapnext = false;
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
seen_disp_event(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save or restore the cursor and SGR mode.
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void save_cursor(Terminal *term, bool save)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
1999-01-08 13:02:13 +00:00
|
|
|
if (save) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->savecurs = term->curs;
|
|
|
|
term->save_attr = term->curr_attr;
|
|
|
|
term->save_truecolour = term->curr_truecolour;
|
|
|
|
term->save_cset = term->cset;
|
|
|
|
term->save_utf = term->utf;
|
|
|
|
term->save_wnext = term->wrapnext;
|
|
|
|
term->save_csattr = term->cset_attr[term->cset];
|
|
|
|
term->save_sco_acs = term->sco_acs;
|
1999-01-08 13:02:13 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curs = term->savecurs;
|
|
|
|
/* Make sure the window hasn't shrunk since the save */
|
|
|
|
if (term->curs.x >= term->cols)
|
|
|
|
term->curs.x = term->cols - 1;
|
|
|
|
if (term->curs.y >= term->rows)
|
|
|
|
term->curs.y = term->rows - 1;
|
|
|
|
|
|
|
|
term->curr_attr = term->save_attr;
|
|
|
|
term->curr_truecolour = term->save_truecolour;
|
|
|
|
term->cset = term->save_cset;
|
|
|
|
term->utf = term->save_utf;
|
|
|
|
term->wrapnext = term->save_wnext;
|
|
|
|
/*
|
|
|
|
* wrapnext might reset to False if the x position is no
|
|
|
|
* longer at the rightmost edge.
|
|
|
|
*/
|
|
|
|
if (term->wrapnext && term->curs.x < term->cols-1)
|
|
|
|
term->wrapnext = false;
|
|
|
|
term->cset_attr[term->cset] = term->save_csattr;
|
|
|
|
term->sco_acs = term->save_sco_acs;
|
|
|
|
set_erase_char(term);
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
seen_disp_event(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-01-02 16:20:29 +00:00
|
|
|
/*
|
|
|
|
* This function is called before doing _anything_ which affects
|
|
|
|
* only part of a line of text. It is used to mark the boundary
|
|
|
|
* between two character positions, and it indicates that some sort
|
|
|
|
* of effect is going to happen on only one side of that boundary.
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2003-01-02 16:20:29 +00:00
|
|
|
* The effect of this function is to check whether a CJK
|
|
|
|
* double-width character is straddling the boundary, and to remove
|
|
|
|
* it and replace it with two spaces if so. (Of course, one or
|
|
|
|
* other of those spaces is then likely to be replaced with
|
|
|
|
* something else again, as a result of whatever happens next.)
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2003-01-02 16:20:29 +00:00
|
|
|
* Also, if the boundary is at the right-hand _edge_ of the screen,
|
|
|
|
* it implies something deliberate is being done to the rightmost
|
|
|
|
* column position; hence we must clear LATTR_WRAPPED2.
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2003-01-02 16:20:29 +00:00
|
|
|
* The input to the function is the coordinates of the _second_
|
|
|
|
* character of the pair.
|
|
|
|
*/
|
|
|
|
static void check_boundary(Terminal *term, int x, int y)
|
|
|
|
{
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline *ldata;
|
2003-01-02 16:20:29 +00:00
|
|
|
|
|
|
|
/* Validate input coordinates, just in case. */
|
2015-10-13 19:33:12 +00:00
|
|
|
if (x <= 0 || x > term->cols)
|
2019-09-08 19:29:00 +00:00
|
|
|
return;
|
2003-01-02 16:20:29 +00:00
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
ldata = scrlineptr(y);
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
check_trust_status(term, ldata);
|
2014-07-23 21:48:02 +00:00
|
|
|
check_line_size(term, ldata);
|
2003-01-02 16:20:29 +00:00
|
|
|
if (x == term->cols) {
|
2019-09-08 19:29:00 +00:00
|
|
|
ldata->lattr &= ~LATTR_WRAPPED2;
|
2003-01-02 16:20:29 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (ldata->chars[x].chr == UCSWIDE) {
|
|
|
|
clear_cc(ldata, x-1);
|
|
|
|
clear_cc(ldata, x);
|
|
|
|
ldata->chars[x-1].chr = ' ' | CSET_ASCII;
|
|
|
|
ldata->chars[x] = ldata->chars[x-1];
|
|
|
|
}
|
2003-01-02 16:20:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Erase a large portion of the screen: the whole screen, or the
|
|
|
|
* whole line, or parts thereof.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
static void erase_lots(Terminal *term,
|
2019-09-08 19:29:00 +00:00
|
|
|
bool line_only, bool from_begin, bool to_end)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2001-04-17 08:24:29 +00:00
|
|
|
pos start, end;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool erase_lattr;
|
|
|
|
bool erasing_lines_from_top = false;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
|
|
|
if (line_only) {
|
2019-09-08 19:29:00 +00:00
|
|
|
start.y = term->curs.y;
|
|
|
|
start.x = 0;
|
|
|
|
end.y = term->curs.y + 1;
|
|
|
|
end.x = 0;
|
|
|
|
erase_lattr = false;
|
1999-01-08 13:02:13 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
start.y = 0;
|
|
|
|
start.x = 0;
|
|
|
|
end.y = term->rows;
|
|
|
|
end.x = 0;
|
|
|
|
erase_lattr = true;
|
2001-04-16 21:25:13 +00:00
|
|
|
}
|
Fix double display glitch in erase_lots().
If the cursor is on the rightmost column of the terminal and
term->wrapnext is set, and the user asks to erase from the current
position to the end of (at least) the line, what should happen?
PuTTY's previous behaviour was to ignore term->wrapnext, and do the
same thing we would have done without it: erase from the current
physical cursor position to EOL inclusive, i.e. blank the character
cell we just printed.
But this is unfortunate if a program writes an interleaving of
printing characters and ESC[K, which I recently found out is what gcc
does in its colour-highlighted error messages: if the last printed
char just before an ESC[K pushes the cursor into the deferred-wrap
state, then the ESC[K blanks that character, and then we wrap to the
next line. So one character of the error message ends up missing.
xfce4-terminal and gnome-terminal take the approach in this situation
of regarding the cursor position as _right_ at the end of the line, so
no character cells get cleared at all, and the error message displays
as intended. I think that's more sensible, so I've switched to doing
the same thing.
(xterm has different behaviour again: it blanks the character cell and
also clears its analogue of the wrapnext flag. So in _their_ handling
of this sequence of output, one character of the error message is
still missing, but it looks as if it's been _omitted_ rather than
replaced by a space.)
Secondly, in the course of fixing that, I looked at the check_boundary
call in erase_lots, which is supposed to ensure that if a wide CJK
character straddles the boundary between what's being erased and what
isn't, then both halves of the character are deleted. I had to modify
that anyway because I was moving that very boundary, and in doing so,
I noticed that even according to the previous behaviour, it had an
off-by-one error. In the case where you send ESC[1K (meaning erase up
to and including the cursor position), the call to check_boundary was
performed on the _left_ edge of the cursor's character cell, when it
should have been the right edge. So you could end up with an
erase_char in the left half (i.e. a space) and still have the magic
value UCSWIDE in the right half, causing the terminal to think you had
a double-width U+0020 on the screen, which isn't supposed to be able
to happen.
2019-08-08 17:05:16 +00:00
|
|
|
|
|
|
|
/* This is the endpoint of the clearing operation that is not
|
|
|
|
* either the start or end of the line / screen. */
|
|
|
|
pos boundary = term->curs;
|
|
|
|
|
2001-04-16 21:25:13 +00:00
|
|
|
if (!from_begin) {
|
Fix double display glitch in erase_lots().
If the cursor is on the rightmost column of the terminal and
term->wrapnext is set, and the user asks to erase from the current
position to the end of (at least) the line, what should happen?
PuTTY's previous behaviour was to ignore term->wrapnext, and do the
same thing we would have done without it: erase from the current
physical cursor position to EOL inclusive, i.e. blank the character
cell we just printed.
But this is unfortunate if a program writes an interleaving of
printing characters and ESC[K, which I recently found out is what gcc
does in its colour-highlighted error messages: if the last printed
char just before an ESC[K pushes the cursor into the deferred-wrap
state, then the ESC[K blanks that character, and then we wrap to the
next line. So one character of the error message ends up missing.
xfce4-terminal and gnome-terminal take the approach in this situation
of regarding the cursor position as _right_ at the end of the line, so
no character cells get cleared at all, and the error message displays
as intended. I think that's more sensible, so I've switched to doing
the same thing.
(xterm has different behaviour again: it blanks the character cell and
also clears its analogue of the wrapnext flag. So in _their_ handling
of this sequence of output, one character of the error message is
still missing, but it looks as if it's been _omitted_ rather than
replaced by a space.)
Secondly, in the course of fixing that, I looked at the check_boundary
call in erase_lots, which is supposed to ensure that if a wide CJK
character straddles the boundary between what's being erased and what
isn't, then both halves of the character are deleted. I had to modify
that anyway because I was moving that very boundary, and in doing so,
I noticed that even according to the previous behaviour, it had an
off-by-one error. In the case where you send ESC[1K (meaning erase up
to and including the cursor position), the call to check_boundary was
performed on the _left_ edge of the cursor's character cell, when it
should have been the right edge. So you could end up with an
erase_char in the left half (i.e. a space) and still have the magic
value UCSWIDE in the right half, causing the terminal to think you had
a double-width U+0020 on the screen, which isn't supposed to be able
to happen.
2019-08-08 17:05:16 +00:00
|
|
|
/*
|
|
|
|
* If we're erasing from the current char to the end of
|
|
|
|
* line/screen, then we take account of wrapnext, so as to
|
|
|
|
* maintain the invariant that writing a printing character
|
|
|
|
* followed by ESC[K should not overwrite the character you
|
|
|
|
* _just wrote_. That is, when wrapnext says the cursor is
|
|
|
|
* 'logically' at the very rightmost edge of the screen
|
|
|
|
* instead of just before the last printing char, ESC[K should
|
|
|
|
* do nothing at all, and ESC[J should clear the next line but
|
|
|
|
* leave this one unchanged.
|
|
|
|
*
|
|
|
|
* This adjusted position will also be the position we use for
|
|
|
|
* check_boundary (i.e. the thing we ensure isn't in the
|
|
|
|
* middle of a double-width printing char).
|
|
|
|
*/
|
|
|
|
if (term->wrapnext)
|
|
|
|
incpos(boundary);
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
start = boundary;
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
2001-04-16 21:25:13 +00:00
|
|
|
if (!to_end) {
|
Fix double display glitch in erase_lots().
If the cursor is on the rightmost column of the terminal and
term->wrapnext is set, and the user asks to erase from the current
position to the end of (at least) the line, what should happen?
PuTTY's previous behaviour was to ignore term->wrapnext, and do the
same thing we would have done without it: erase from the current
physical cursor position to EOL inclusive, i.e. blank the character
cell we just printed.
But this is unfortunate if a program writes an interleaving of
printing characters and ESC[K, which I recently found out is what gcc
does in its colour-highlighted error messages: if the last printed
char just before an ESC[K pushes the cursor into the deferred-wrap
state, then the ESC[K blanks that character, and then we wrap to the
next line. So one character of the error message ends up missing.
xfce4-terminal and gnome-terminal take the approach in this situation
of regarding the cursor position as _right_ at the end of the line, so
no character cells get cleared at all, and the error message displays
as intended. I think that's more sensible, so I've switched to doing
the same thing.
(xterm has different behaviour again: it blanks the character cell and
also clears its analogue of the wrapnext flag. So in _their_ handling
of this sequence of output, one character of the error message is
still missing, but it looks as if it's been _omitted_ rather than
replaced by a space.)
Secondly, in the course of fixing that, I looked at the check_boundary
call in erase_lots, which is supposed to ensure that if a wide CJK
character straddles the boundary between what's being erased and what
isn't, then both halves of the character are deleted. I had to modify
that anyway because I was moving that very boundary, and in doing so,
I noticed that even according to the previous behaviour, it had an
off-by-one error. In the case where you send ESC[1K (meaning erase up
to and including the cursor position), the call to check_boundary was
performed on the _left_ edge of the cursor's character cell, when it
should have been the right edge. So you could end up with an
erase_char in the left half (i.e. a space) and still have the magic
value UCSWIDE in the right half, causing the terminal to think you had
a double-width U+0020 on the screen, which isn't supposed to be able
to happen.
2019-08-08 17:05:16 +00:00
|
|
|
/*
|
|
|
|
* If we're erasing from the start of (at least) the line _to_
|
|
|
|
* the current position, then that is taken to mean 'inclusive
|
|
|
|
* of the cell under the cursor', which means we don't
|
|
|
|
* consider wrapnext at all: whether it's set or not, we still
|
|
|
|
* clear the cell under the cursor.
|
|
|
|
*
|
|
|
|
* Again, that incremented boundary position is where we
|
|
|
|
* should be careful of a straddling wide character.
|
|
|
|
*/
|
|
|
|
incpos(boundary);
|
2019-09-08 19:29:00 +00:00
|
|
|
end = boundary;
|
2001-04-16 21:25:13 +00:00
|
|
|
}
|
2003-01-02 16:20:29 +00:00
|
|
|
if (!from_begin || !to_end)
|
2019-09-08 19:29:00 +00:00
|
|
|
check_boundary(term, boundary.x, boundary.y);
|
2002-10-22 16:11:33 +00:00
|
|
|
check_selection(term, start, end);
|
2000-03-17 10:39:05 +00:00
|
|
|
|
|
|
|
/* Clear screen also forces a full window redraw, just in case. */
|
2002-10-22 16:11:33 +00:00
|
|
|
if (start.y == 0 && start.x == 0 && end.y == term->rows)
|
2019-09-08 19:29:00 +00:00
|
|
|
term_invalidate(term);
|
2000-03-17 10:39:05 +00:00
|
|
|
|
2003-03-07 18:18:38 +00:00
|
|
|
/* Lines scrolled away shouldn't be brought back on if the terminal
|
|
|
|
* resizes. */
|
|
|
|
if (start.y == 0 && start.x == 0 && end.x == 0 && erase_lattr)
|
2019-09-08 19:29:00 +00:00
|
|
|
erasing_lines_from_top = true;
|
2003-03-07 18:18:38 +00:00
|
|
|
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
if (term->erase_to_scrollback && erasing_lines_from_top) {
|
2019-09-08 19:29:00 +00:00
|
|
|
/* If it's a whole number of lines, starting at the top, and
|
|
|
|
* we're fully erasing them, erase by scrolling and keep the
|
|
|
|
* lines in the scrollback. */
|
|
|
|
int scrolllines = end.y;
|
|
|
|
if (end.y == term->rows) {
|
|
|
|
/* Shrink until we find a non-empty row.*/
|
|
|
|
scrolllines = find_last_nonempty_line(term, term->screen) + 1;
|
|
|
|
}
|
|
|
|
if (scrolllines > 0)
|
|
|
|
scroll(term, 0, scrolllines - 1, scrolllines, true);
|
2003-03-06 12:51:12 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
termline *ldata = scrlineptr(start.y);
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
check_trust_status(term, ldata);
|
2019-09-08 19:29:00 +00:00
|
|
|
while (poslt(start, end)) {
|
2014-07-23 21:48:02 +00:00
|
|
|
check_line_size(term, ldata);
|
2019-09-08 19:29:00 +00:00
|
|
|
if (start.x == term->cols) {
|
|
|
|
if (!erase_lattr)
|
|
|
|
ldata->lattr &= ~(LATTR_WRAPPED | LATTR_WRAPPED2);
|
|
|
|
else
|
|
|
|
ldata->lattr = LATTR_NORM;
|
|
|
|
} else {
|
|
|
|
copy_termchar(ldata, start.x, &term->erase_char);
|
|
|
|
}
|
|
|
|
if (incpos(start) && start.y < term->rows) {
|
|
|
|
ldata = scrlineptr(start.y);
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
check_trust_status(term, ldata);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
}
|
2001-04-16 21:25:13 +00:00
|
|
|
}
|
2003-03-07 18:18:38 +00:00
|
|
|
|
|
|
|
/* After an erase of lines from the top of the screen, we shouldn't
|
|
|
|
* bring the lines back again if the terminal enlarges (since the user or
|
2017-04-15 08:06:22 +00:00
|
|
|
* application has explicitly thrown them away). */
|
2003-03-07 18:18:38 +00:00
|
|
|
if (erasing_lines_from_top && !(term->alt_which))
|
2019-09-08 19:29:00 +00:00
|
|
|
term->tempsblines = 0;
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
|
|
|
|
seen_disp_event(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert or delete characters within the current line. n is +ve if
|
|
|
|
* insertion is desired, and -ve for deletion.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
static void insch(Terminal *term, int n)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
1999-01-08 13:02:13 +00:00
|
|
|
int dir = (n < 0 ? -1 : +1);
|
2004-10-14 16:42:43 +00:00
|
|
|
int m, j;
|
2013-11-25 19:46:05 +00:00
|
|
|
pos eol;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline *ldata;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
|
|
|
n = (n < 0 ? -n : n);
|
2002-10-22 16:11:33 +00:00
|
|
|
if (n > term->cols - term->curs.x)
|
2019-09-08 19:29:00 +00:00
|
|
|
n = term->cols - term->curs.x;
|
2002-10-22 16:11:33 +00:00
|
|
|
m = term->cols - term->curs.x - n;
|
2013-11-25 19:46:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We must de-highlight the selection if it overlaps any part of
|
|
|
|
* the region affected by this operation, i.e. the region from the
|
|
|
|
* current cursor position to end-of-line, _unless_ the entirety
|
|
|
|
* of the selection is going to be moved to the left or right by
|
|
|
|
* this operation but otherwise unchanged, in which case we can
|
|
|
|
* simply move the highlight with the text.
|
|
|
|
*/
|
|
|
|
eol.y = term->curs.y;
|
|
|
|
eol.x = term->cols;
|
|
|
|
if (poslt(term->curs, term->selend) && poslt(term->selstart, eol)) {
|
|
|
|
pos okstart = term->curs;
|
|
|
|
pos okend = eol;
|
|
|
|
if (dir > 0) {
|
|
|
|
/* Insertion: n characters at EOL will be splatted. */
|
|
|
|
okend.x -= n;
|
|
|
|
} else {
|
|
|
|
/* Deletion: n characters at cursor position will be splatted. */
|
|
|
|
okstart.x += n;
|
|
|
|
}
|
|
|
|
if (posle(okstart, term->selstart) && posle(term->selend, okend)) {
|
|
|
|
/* Selection is contained entirely in the interval
|
|
|
|
* [okstart,okend), so we need only adjust the selection
|
|
|
|
* bounds. */
|
|
|
|
term->selstart.x += dir * n;
|
|
|
|
term->selend.x += dir * n;
|
|
|
|
assert(term->selstart.x >= term->curs.x);
|
|
|
|
assert(term->selstart.x < term->cols);
|
|
|
|
assert(term->selend.x > term->curs.x);
|
|
|
|
assert(term->selend.x <= term->cols);
|
|
|
|
} else {
|
|
|
|
/* Selection is not wholly contained in that interval, so
|
|
|
|
* we must unhighlight it. */
|
|
|
|
deselect(term);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-01-02 16:20:29 +00:00
|
|
|
check_boundary(term, term->curs.x, term->curs.y);
|
|
|
|
if (dir < 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
check_boundary(term, term->curs.x + n, term->curs.y);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
ldata = scrlineptr(term->curs.y);
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
check_trust_status(term, ldata);
|
1999-01-08 13:02:13 +00:00
|
|
|
if (dir < 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
for (j = 0; j < m; j++)
|
|
|
|
move_termchar(ldata,
|
|
|
|
ldata->chars + term->curs.x + j,
|
|
|
|
ldata->chars + term->curs.x + j + n);
|
|
|
|
while (n--)
|
|
|
|
copy_termchar(ldata, term->curs.x + m++, &term->erase_char);
|
1999-01-08 13:02:13 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
for (j = m; j-- ;)
|
|
|
|
move_termchar(ldata,
|
|
|
|
ldata->chars + term->curs.x + j + n,
|
|
|
|
ldata->chars + term->curs.x + j);
|
|
|
|
while (n--)
|
|
|
|
copy_termchar(ldata, term->curs.x + n, &term->erase_char);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-07 19:59:21 +00:00
|
|
|
static void term_update_raw_mouse_mode(Terminal *term)
|
|
|
|
{
|
|
|
|
bool want_raw = (term->xterm_mouse != 0 && !term->xterm_mouse_forbidden);
|
|
|
|
win_set_raw_mouse_mode(term->win, want_raw);
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_pointer_shape_pending = true;
|
|
|
|
term->win_pointer_shape_raw = want_raw;
|
|
|
|
term_schedule_update(term);
|
2021-02-07 19:59:21 +00:00
|
|
|
}
|
|
|
|
|
2021-12-13 18:49:45 +00:00
|
|
|
static void term_request_resize(Terminal *term, int cols, int rows)
|
|
|
|
{
|
|
|
|
if (term->cols == cols && term->rows == rows)
|
|
|
|
return; /* don't need to do anything */
|
|
|
|
|
Suspend terminal output while a window resize is pending.
This is the payoff from the last few commits of refactoring. It fixes
the following race-condition bug in terminal application redraw:
* server sends a window-resizing escape sequence
* terminal requests a window resize from the front end
* server sends further escape sequences to perform a redraw of some
full-screen application, which assume that the window resize has
occurred and the window is already its new size
* terminal processes all those sequences in the context of the old
window size, while the front end is still thinking
* window resize completes in the front end and term_size() tells the
terminal it now has its new size, but it's too late, the screen
redraw has made a total mess.
(Perhaps the server might even send its window resize + followup
redraw all in one SSH packet, so that it's all queued in term->inbuf
in one go.)
As far as I can see, handling of this case has been broken more or
less forever in the GTK frontend (where window resizes are inherently
asynchronous due to the way X11 works, and we've never done anything
to compensate for that). On Windows, where window size is changed via
SetWindowPos which is synchronous, it used to work, but broke in
commit d74308e90e3813a (i.e. between 0.74 and 0.75), which made all
the ancillary window updates run on the same delayed-action timer as
ordinary text display.
So, it's time to fix it, and I think now I should be able to fix it in
GTK as well as on Windows.
Now, as soon as we've set the term->win_resize_pending flag (in
response to a resize escape sequence), the next return to the top of
the main loop in term_out will terminate output processing early,
leaving any further terminal data still in the term->inbuf bufchain.
Once we get a term_size() callback from the front end telling us our
new size, we reset term->win_resize_pending, which unblocks output
processing again, and we also queue a toplevel callback to have
another try at term_out() so that it will be unblocked promptly.
To implement this I've changed term->win_resize_pending from a bool
into a three-state enumeration, so that we can tell the difference
between 'pending' in the sense of not yet having sent our resize
request to the frontend, and in the sense of waiting for the frontend
to reply. That way, a window resize from the GUI user at least won't
be mistaken for the response to our resize request if it arrives in
the former state. (It can still be mistaken for one in the latter
case, but if the user is resizing the window at the same time as the
server-side application is doing critically size-dependent redrawing,
I don't think there can be any reasonable expectation of nothing going
wrong.)
As mentioned in the previous commit, some failure modes under X11 (in
particular the window manager process getting wedged in some way) can
result in no response being received to a ConfigureWindow request. In
that situation, it seems to me that we really _shouldn't_ sit there
waiting forever - perhaps it's technically the WM's fault and not
ours, but what kind of X window are you most likely to want to use to
do emergency WM repair? A terminal window, of course, so it would be
exceptionally unhelpful to make any terminal window stop working
completely in this situation! Hence, there's a fallback timeout in
terminal.c, so that if we don't receive a response in _too_ long,
we'll assume one is not forthcoming, and resume processing terminal
data at the old window size. The fallback timeout is set to 5 seconds,
following existing practice in libXt (DEFAULT_WM_TIMEOUT).
2021-12-19 10:37:02 +00:00
|
|
|
term->win_resize_pending = WIN_RESIZE_NEED_SEND;
|
2021-12-13 18:49:45 +00:00
|
|
|
term->win_resize_pending_w = cols;
|
|
|
|
term->win_resize_pending_h = rows;
|
|
|
|
term_schedule_update(term);
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Toggle terminal mode `mode' to state `state'. (`query' indicates
|
|
|
|
* whether the mode is a DEC private one or a normal one.)
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void toggle_mode(Terminal *term, int mode, int query, bool state)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
if (query == 1) {
|
2019-09-08 19:29:00 +00:00
|
|
|
switch (mode) {
|
|
|
|
case 1: /* DECCKM: application cursor keys */
|
|
|
|
term->app_cursor_keys = state;
|
|
|
|
break;
|
|
|
|
case 2: /* DECANM: VT52 mode */
|
|
|
|
term->vt52_mode = !state;
|
|
|
|
if (term->vt52_mode) {
|
|
|
|
term->blink_is_real = false;
|
|
|
|
term->vt52_bold = false;
|
|
|
|
} else {
|
|
|
|
term->blink_is_real = term->blinktext;
|
|
|
|
}
|
|
|
|
term_schedule_tblink(term);
|
|
|
|
break;
|
|
|
|
case 3: /* DECCOLM: 80/132 columns */
|
|
|
|
deselect(term);
|
2021-12-13 18:49:45 +00:00
|
|
|
if (!term->no_remote_resize)
|
|
|
|
term_request_resize(term, state ? 132 : 80, term->rows);
|
2019-09-08 19:29:00 +00:00
|
|
|
term->reset_132 = state;
|
|
|
|
term->alt_t = term->marg_t = 0;
|
|
|
|
term->alt_b = term->marg_b = term->rows - 1;
|
|
|
|
move(term, 0, 0, 0);
|
|
|
|
erase_lots(term, false, true, true);
|
|
|
|
break;
|
|
|
|
case 5: /* DECSCNM: reverse video */
|
|
|
|
/*
|
|
|
|
* Toggle reverse video. If we receive an OFF within the
|
|
|
|
* visual bell timeout period after an ON, we trigger an
|
|
|
|
* effective visual bell, so that ESC[?5hESC[?5l will
|
|
|
|
* always be an actually _visible_ visual bell.
|
|
|
|
*/
|
|
|
|
if (term->rvideo && !state) {
|
|
|
|
/* This is an OFF, so set up a vbell */
|
|
|
|
term_schedule_vbell(term, true, term->rvbell_startpoint);
|
|
|
|
} else if (!term->rvideo && state) {
|
|
|
|
/* This is an ON, so we notice the time and save it. */
|
|
|
|
term->rvbell_startpoint = GETTICKCOUNT();
|
|
|
|
}
|
|
|
|
term->rvideo = state;
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 6: /* DECOM: DEC origin mode */
|
|
|
|
term->dec_om = state;
|
|
|
|
break;
|
|
|
|
case 7: /* DECAWM: auto wrap */
|
|
|
|
term->wrap = state;
|
Don't set term->wrapnext when not in auto-wrapping mode.
A user sent a transcript from a curses-based tool 'ncmpc', which
carefully disables terminal autowrap when printing a character in the
bottom right corner of the display, and then turns it back on again.
After that, it expects that sending the backspace character really
moves the cursor back a space, instead of clearing the wrapnext flag.
But in PuTTY, we set the wrapnext flag even if we're not in wrapping
mode - it just doesn't _do_ anything when the next character is sent.
But it remains set, and still affects backspace. So the display is
corrupted by this change of expectation.
(Specifically, ncmpc is printing a time display [m:ss] in the very
bottom right, so it disables wrap in order to print the final ']'.
Then the next thing it needs to do is to update the low-order digit of
the seconds field, so it sends \b as the simplest way to get to that
character. The effect on the display is that the updated seconds digit
appears where the ] was, instead of overwriting the old seconds digit.)
This is a tradeoff in desirable behaviours. The point of having a
backspace operation cancel the wrapnext flag and not actually move the
cursor is to preserve the invariant that sending 'x', backspace, 'y'
causes the y to overprint the x, even if that happens near the end of
the terminal's line length. In non-wrapping mode that invariant was
bound to break _eventually_, but with this change, it breaks one
character earlier than before. However, I think that's less bad than
breaking the expectations of curses-based full-screen applications,
especially since the _main_ need for that invariant arises from naïve
applications that don't want to have to think about the terminal width
at all - and those applications generally run in _wrapping_ mode,
where it's possible to continue the invariant across multiple lines in
any case.
2024-08-10 09:38:02 +00:00
|
|
|
if (!term->wrap)
|
|
|
|
term->wrapnext = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 8: /* DECARM: auto key repeat */
|
|
|
|
term->repeat_off = !state;
|
|
|
|
break;
|
|
|
|
case 25: /* DECTCEM: enable/disable cursor */
|
|
|
|
compatibility2(OTHER, VT220);
|
|
|
|
term->cursor_on = state;
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 47: /* alternate screen */
|
|
|
|
compatibility(OTHER);
|
|
|
|
deselect(term);
|
|
|
|
swap_screen(term, term->no_alt_screen ? 0 : state, false, false);
|
2014-01-25 18:38:38 +00:00
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 1000: /* xterm mouse 1 (normal) */
|
|
|
|
term->xterm_mouse = state ? 1 : 0;
|
2021-02-07 19:59:21 +00:00
|
|
|
term_update_raw_mouse_mode(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 1002: /* xterm mouse 2 (inc. button drags) */
|
|
|
|
term->xterm_mouse = state ? 2 : 0;
|
2021-02-07 19:59:21 +00:00
|
|
|
term_update_raw_mouse_mode(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
2019-12-20 13:56:58 +00:00
|
|
|
case 1003: /* xterm mouse any-event tracking */
|
|
|
|
term->xterm_mouse = state ? 3 : 0;
|
|
|
|
term_update_raw_mouse_mode(term);
|
|
|
|
break;
|
2019-09-08 19:29:00 +00:00
|
|
|
case 1006: /* xterm extended mouse */
|
|
|
|
term->xterm_extended_mouse = state;
|
|
|
|
break;
|
|
|
|
case 1015: /* urxvt extended mouse */
|
|
|
|
term->urxvt_extended_mouse = state;
|
|
|
|
break;
|
|
|
|
case 1047: /* alternate screen */
|
|
|
|
compatibility(OTHER);
|
|
|
|
deselect(term);
|
|
|
|
swap_screen(term, term->no_alt_screen ? 0 : state, true, true);
|
2014-01-25 18:38:38 +00:00
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 1048: /* save/restore cursor */
|
|
|
|
if (!term->no_alt_screen)
|
|
|
|
save_cursor(term, state);
|
|
|
|
if (!state) seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 1049: /* cursor & alternate screen */
|
|
|
|
if (state && !term->no_alt_screen)
|
|
|
|
save_cursor(term, state);
|
|
|
|
if (!state) seen_disp_event(term);
|
|
|
|
compatibility(OTHER);
|
|
|
|
deselect(term);
|
|
|
|
swap_screen(term, term->no_alt_screen ? 0 : state, true, false);
|
|
|
|
if (!state && !term->no_alt_screen)
|
2003-05-27 09:43:14 +00:00
|
|
|
save_cursor(term, state);
|
2014-01-25 18:38:38 +00:00
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 2004: /* xterm bracketed paste */
|
|
|
|
term->bracketed_paste = state ? true : false;
|
|
|
|
break;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
}
|
|
|
|
} else if (query == 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
switch (mode) {
|
|
|
|
case 4: /* IRM: set insert mode */
|
|
|
|
compatibility(VT102);
|
|
|
|
term->insert = state;
|
|
|
|
break;
|
|
|
|
case 12: /* SRM: set echo mode */
|
2019-06-17 19:21:06 +00:00
|
|
|
term->srm_echo = !state;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 20: /* LNM: Return sends ... */
|
|
|
|
term->cr_lf_return = state;
|
|
|
|
break;
|
|
|
|
case 34: /* WYULCURM: Make cursor BIG */
|
|
|
|
compatibility2(OTHER, VT220);
|
|
|
|
term->big_cursor = !state;
|
|
|
|
}
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process an OSC sequence: set window title or icon name.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
static void do_osc(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2023-10-19 17:55:04 +00:00
|
|
|
if (term->osc_is_apc) {
|
|
|
|
/* This OSC was really an APC, and we don't support that
|
|
|
|
* sequence at all. We only recognise it in order to ignore it
|
|
|
|
* and filter it out of input. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->osc_w) {
|
2019-09-08 19:29:00 +00:00
|
|
|
while (term->osc_strlen--)
|
2022-08-03 19:48:46 +00:00
|
|
|
term->wordness[(unsigned char)term->osc_string[term->osc_strlen]] =
|
|
|
|
term->esc_args[0];
|
1999-01-08 13:02:13 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->osc_string[term->osc_strlen] = '\0';
|
|
|
|
switch (term->esc_args[0]) {
|
|
|
|
case 0:
|
|
|
|
case 1:
|
2021-02-07 19:59:20 +00:00
|
|
|
if (!term->no_remote_wintitle) {
|
|
|
|
sfree(term->icon_title);
|
|
|
|
term->icon_title = dupstr(term->osc_string);
|
win_set_[icon_]title: send a codepage along with the string.
While fixing the previous commit I noticed that window titles don't
actually _work_ properly if you change the terminal character set,
because the text accumulated in the OSC string buffer is sent to the
TermWin as raw bytes, with no indication of what character set it
should interpret them as. You might get lucky if you happened to
choose the right charset (in particular, UTF-8 is a common default),
but if you change the charset half way through a run, then there's
certainly no way the frontend will know to interpret two window titles
sent before and after the change in two different charsets.
So, now win_set_title() and win_set_icon_title() both include a
codepage parameter along with the byte string, and it's up to them to
translate the provided window title from that encoding to whatever the
local window system expects to receive.
On Windows, that's wide-string Unicode, so we can just use the
existing dup_mb_to_wc utility function. But in GTK, it's UTF-8, so I
had to write an extra utility function to encode a wide string as
UTF-8.
2021-10-16 12:20:44 +00:00
|
|
|
term->icontitle_codepage = term->ucsdata->line_codepage;
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_icon_title_pending = true;
|
|
|
|
term_schedule_update(term);
|
2021-02-07 19:59:20 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->esc_args[0] == 1)
|
|
|
|
break;
|
|
|
|
/* fall through: parameter 0 means set both */
|
|
|
|
case 2:
|
|
|
|
case 21:
|
2021-02-07 19:59:20 +00:00
|
|
|
if (!term->no_remote_wintitle) {
|
|
|
|
sfree(term->window_title);
|
|
|
|
term->window_title = dupstr(term->osc_string);
|
win_set_[icon_]title: send a codepage along with the string.
While fixing the previous commit I noticed that window titles don't
actually _work_ properly if you change the terminal character set,
because the text accumulated in the OSC string buffer is sent to the
TermWin as raw bytes, with no indication of what character set it
should interpret them as. You might get lucky if you happened to
choose the right charset (in particular, UTF-8 is a common default),
but if you change the charset half way through a run, then there's
certainly no way the frontend will know to interpret two window titles
sent before and after the change in two different charsets.
So, now win_set_title() and win_set_icon_title() both include a
codepage parameter along with the byte string, and it's up to them to
translate the provided window title from that encoding to whatever the
local window system expects to receive.
On Windows, that's wide-string Unicode, so we can just use the
existing dup_mb_to_wc utility function. But in GTK, it's UTF-8, so I
had to write an extra utility function to encode a wide string as
UTF-8.
2021-10-16 12:20:44 +00:00
|
|
|
term->wintitle_codepage = term->ucsdata->line_codepage;
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_title_pending = true;
|
|
|
|
term_schedule_update(term);
|
2021-02-07 19:59:20 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
2017-10-05 19:43:02 +00:00
|
|
|
case 4:
|
|
|
|
if (term->ldisc && !strcmp(term->osc_string, "?")) {
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
unsigned index = term->esc_args[1];
|
|
|
|
if (index < OSC4_NCOLOURS) {
|
|
|
|
rgb colour = term->palette[index];
|
2017-10-05 19:43:02 +00:00
|
|
|
char *reply_buf = dupprintf(
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
"\033]4;%u;rgb:%04x/%04x/%04x\007", index,
|
|
|
|
(unsigned)colour.r * 0x0101,
|
|
|
|
(unsigned)colour.g * 0x0101,
|
|
|
|
(unsigned)colour.b * 0x0101);
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
ldisc_send(term->ldisc, reply_buf, strlen(reply_buf),
|
|
|
|
false);
|
2017-10-05 19:43:02 +00:00
|
|
|
sfree(reply_buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-03-09 17:59:15 +00:00
|
|
|
/*
|
|
|
|
* ANSI printing routines.
|
|
|
|
*/
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
static void term_print_setup(Terminal *term, char *printer)
|
2002-03-09 17:59:15 +00:00
|
|
|
{
|
2002-10-22 16:11:33 +00:00
|
|
|
bufchain_clear(&term->printer_buf);
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
term->print_job = printer_start_job(printer);
|
2002-03-09 17:59:15 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
static void term_print_flush(Terminal *term)
|
2002-03-09 17:59:15 +00:00
|
|
|
{
|
2019-02-06 20:46:45 +00:00
|
|
|
size_t size;
|
2002-10-22 16:11:33 +00:00
|
|
|
while ((size = bufchain_size(&term->printer_buf)) > 5) {
|
2019-09-08 19:29:00 +00:00
|
|
|
ptrlen data = bufchain_prefix(&term->printer_buf);
|
|
|
|
if (data.len > size-5)
|
|
|
|
data.len = size-5;
|
|
|
|
printer_job_data(term->print_job, data.ptr, data.len);
|
|
|
|
bufchain_consume(&term->printer_buf, data.len);
|
2002-03-09 17:59:15 +00:00
|
|
|
}
|
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
static void term_print_finish(Terminal *term)
|
2002-03-09 17:59:15 +00:00
|
|
|
{
|
2019-02-06 20:46:45 +00:00
|
|
|
size_t size;
|
2002-03-09 17:59:15 +00:00
|
|
|
char c;
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
if (!term->printing && !term->only_printing)
|
2019-09-08 19:29:00 +00:00
|
|
|
return; /* we need do nothing */
|
2002-10-09 18:09:42 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
term_print_flush(term);
|
|
|
|
while ((size = bufchain_size(&term->printer_buf)) > 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
ptrlen data = bufchain_prefix(&term->printer_buf);
|
|
|
|
c = *(char *)data.ptr;
|
|
|
|
if (c == '\033' || c == '\233') {
|
|
|
|
bufchain_consume(&term->printer_buf, size);
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
printer_job_data(term->print_job, &c, 1);
|
|
|
|
bufchain_consume(&term->printer_buf, 1);
|
|
|
|
}
|
2002-03-09 17:59:15 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
printer_finish_job(term->print_job);
|
|
|
|
term->print_job = NULL;
|
2018-10-29 19:50:29 +00:00
|
|
|
term->printing = term->only_printing = false;
|
2002-03-09 17:59:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 13:16:11 +00:00
|
|
|
static void term_display_graphic_char(Terminal *term, unsigned long c)
|
|
|
|
{
|
|
|
|
termline *cline = scrlineptr(term->curs.y);
|
|
|
|
int width = 0;
|
|
|
|
if (DIRECT_CHAR(c))
|
|
|
|
width = 1;
|
|
|
|
if (!width)
|
2019-03-09 16:45:12 +00:00
|
|
|
width = term_char_width(term, c);
|
2018-05-18 13:16:11 +00:00
|
|
|
|
|
|
|
if (term->wrapnext && term->wrap && width > 0) {
|
|
|
|
cline->lattr |= LATTR_WRAPPED;
|
|
|
|
if (term->curs.y == term->marg_b)
|
2018-10-29 19:50:29 +00:00
|
|
|
scroll(term, term->marg_t, term->marg_b, 1, true);
|
2018-05-18 13:16:11 +00:00
|
|
|
else if (term->curs.y < term->rows - 1)
|
|
|
|
term->curs.y++;
|
|
|
|
term->curs.x = 0;
|
2018-10-29 19:50:29 +00:00
|
|
|
term->wrapnext = false;
|
2018-05-18 13:16:11 +00:00
|
|
|
cline = scrlineptr(term->curs.y);
|
|
|
|
}
|
|
|
|
if (term->insert && width > 0)
|
|
|
|
insch(term, width);
|
|
|
|
if (term->selstate != NO_SELECTION) {
|
|
|
|
pos cursplus = term->curs;
|
|
|
|
incpos(cursplus);
|
|
|
|
check_selection(term, term->curs, cursplus);
|
|
|
|
}
|
|
|
|
if (((c & CSET_MASK) == CSET_ASCII ||
|
|
|
|
(c & CSET_MASK) == 0) && term->logctx)
|
|
|
|
logtraffic(term->logctx, (unsigned char) c, LGTYP_ASCII);
|
|
|
|
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
check_trust_status(term, cline);
|
|
|
|
|
|
|
|
int linecols = term->cols;
|
|
|
|
if (cline->trusted)
|
|
|
|
linecols -= TRUST_SIGIL_WIDTH;
|
|
|
|
|
2019-03-14 18:13:01 +00:00
|
|
|
/*
|
2023-03-05 10:04:25 +00:00
|
|
|
* Before we switch on the character width, do a preliminary check for
|
|
|
|
* cases where we might have no room at all to display a double-width
|
|
|
|
* character. Our fallback is to substitute REPLACEMENT CHARACTER,
|
|
|
|
* which is single-width, and it's easiest to do that _before_ having
|
|
|
|
* to 'goto' from one switch case to another.
|
2019-03-14 18:13:01 +00:00
|
|
|
*/
|
2023-03-05 10:04:25 +00:00
|
|
|
if (width == 2 && term->curs.x >= linecols-1) {
|
|
|
|
/*
|
|
|
|
* If we're in wrapping mode and the terminal is at least 2 cells
|
|
|
|
* wide, it's OK, we have a fallback. But otherwise, substitute.
|
|
|
|
*/
|
|
|
|
if (linecols < 2 || !term->wrap) {
|
|
|
|
width = 1;
|
|
|
|
c = 0xFFFD;
|
|
|
|
}
|
2019-03-14 18:13:01 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 13:16:11 +00:00
|
|
|
switch (width) {
|
|
|
|
case 2:
|
|
|
|
/*
|
2023-03-05 10:04:25 +00:00
|
|
|
* If we're about to display a double-width character starting in
|
|
|
|
* the rightmost column (and we're in wrapping mode - the other
|
|
|
|
* case was disposed of above), then we do something special
|
|
|
|
* instead. We must print a space in the last column of the screen,
|
|
|
|
* then wrap; and we also set LATTR_WRAPPED2 which instructs
|
|
|
|
* subsequent cut-and-pasting not only to splice this line to the
|
|
|
|
* one after it, but to ignore the space in the last character
|
|
|
|
* position as well. (Because what was actually output to the
|
|
|
|
* terminal was presumably just a sequence of CJK characters, and
|
|
|
|
* we don't want a space to be pasted in the middle of those just
|
|
|
|
* because they had the misfortune to start in the wrong parity
|
|
|
|
* column. xterm concurs.)
|
2018-05-18 13:16:11 +00:00
|
|
|
*/
|
|
|
|
check_boundary(term, term->curs.x, term->curs.y);
|
|
|
|
check_boundary(term, term->curs.x+2, term->curs.y);
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (term->curs.x >= linecols-1) {
|
2023-03-05 10:04:25 +00:00
|
|
|
assert(term->wrap); /* we handled the non-wrapping case above */
|
2018-05-18 13:16:11 +00:00
|
|
|
copy_termchar(cline, term->curs.x,
|
|
|
|
&term->erase_char);
|
|
|
|
cline->lattr |= LATTR_WRAPPED | LATTR_WRAPPED2;
|
|
|
|
if (term->curs.y == term->marg_b)
|
|
|
|
scroll(term, term->marg_t, term->marg_b,
|
2018-10-29 19:50:29 +00:00
|
|
|
1, true);
|
2018-05-18 13:16:11 +00:00
|
|
|
else if (term->curs.y < term->rows - 1)
|
|
|
|
term->curs.y++;
|
|
|
|
term->curs.x = 0;
|
|
|
|
cline = scrlineptr(term->curs.y);
|
|
|
|
/* Now we must check_boundary again, of course. */
|
|
|
|
check_boundary(term, term->curs.x, term->curs.y);
|
|
|
|
check_boundary(term, term->curs.x+2, term->curs.y);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FULL-TERMCHAR */
|
|
|
|
clear_cc(cline, term->curs.x);
|
|
|
|
cline->chars[term->curs.x].chr = c;
|
|
|
|
cline->chars[term->curs.x].attr = term->curr_attr;
|
|
|
|
cline->chars[term->curs.x].truecolour =
|
|
|
|
term->curr_truecolour;
|
|
|
|
|
|
|
|
term->curs.x++;
|
|
|
|
|
|
|
|
/* FULL-TERMCHAR */
|
|
|
|
clear_cc(cline, term->curs.x);
|
|
|
|
cline->chars[term->curs.x].chr = UCSWIDE;
|
|
|
|
cline->chars[term->curs.x].attr = term->curr_attr;
|
|
|
|
cline->chars[term->curs.x].truecolour =
|
|
|
|
term->curr_truecolour;
|
|
|
|
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
check_boundary(term, term->curs.x, term->curs.y);
|
|
|
|
check_boundary(term, term->curs.x+1, term->curs.y);
|
|
|
|
|
|
|
|
/* FULL-TERMCHAR */
|
|
|
|
clear_cc(cline, term->curs.x);
|
|
|
|
cline->chars[term->curs.x].chr = c;
|
|
|
|
cline->chars[term->curs.x].attr = term->curr_attr;
|
|
|
|
cline->chars[term->curs.x].truecolour =
|
|
|
|
term->curr_truecolour;
|
|
|
|
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
if (term->curs.x > 0) {
|
|
|
|
int x = term->curs.x - 1;
|
|
|
|
|
|
|
|
/* If we're in wrapnext state, the character to combine
|
|
|
|
* with is _here_, not to our left. */
|
|
|
|
if (term->wrapnext)
|
|
|
|
x++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the previous character is UCSWIDE, back up another
|
|
|
|
* one.
|
|
|
|
*/
|
|
|
|
if (cline->chars[x].chr == UCSWIDE) {
|
|
|
|
assert(x > 0);
|
|
|
|
x--;
|
|
|
|
}
|
|
|
|
|
|
|
|
add_cc(cline, x, c);
|
|
|
|
seen_disp_event(term);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
term->curs.x++;
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (term->curs.x >= linecols) {
|
|
|
|
term->curs.x = linecols - 1;
|
Don't set term->wrapnext when not in auto-wrapping mode.
A user sent a transcript from a curses-based tool 'ncmpc', which
carefully disables terminal autowrap when printing a character in the
bottom right corner of the display, and then turns it back on again.
After that, it expects that sending the backspace character really
moves the cursor back a space, instead of clearing the wrapnext flag.
But in PuTTY, we set the wrapnext flag even if we're not in wrapping
mode - it just doesn't _do_ anything when the next character is sent.
But it remains set, and still affects backspace. So the display is
corrupted by this change of expectation.
(Specifically, ncmpc is printing a time display [m:ss] in the very
bottom right, so it disables wrap in order to print the final ']'.
Then the next thing it needs to do is to update the low-order digit of
the seconds field, so it sends \b as the simplest way to get to that
character. The effect on the display is that the updated seconds digit
appears where the ] was, instead of overwriting the old seconds digit.)
This is a tradeoff in desirable behaviours. The point of having a
backspace operation cancel the wrapnext flag and not actually move the
cursor is to preserve the invariant that sending 'x', backspace, 'y'
causes the y to overprint the x, even if that happens near the end of
the terminal's line length. In non-wrapping mode that invariant was
bound to break _eventually_, but with this change, it breaks one
character earlier than before. However, I think that's less bad than
breaking the expectations of curses-based full-screen applications,
especially since the _main_ need for that invariant arises from naïve
applications that don't want to have to think about the terminal width
at all - and those applications generally run in _wrapping_ mode,
where it's possible to continue the invariant across multiple lines in
any case.
2024-08-10 09:38:02 +00:00
|
|
|
|
|
|
|
if (term->wrap) {
|
|
|
|
if (!term->vt52_mode) {
|
|
|
|
/* Set the wrapnext flag, so that the next character
|
|
|
|
* wraps, but this one doesn't. */
|
|
|
|
term->wrapnext = true;
|
|
|
|
} else {
|
|
|
|
/* VT52 mode expects simpler handling, and we just
|
|
|
|
* wrap straight away. */
|
|
|
|
cline->lattr |= LATTR_WRAPPED;
|
|
|
|
if (term->curs.y == term->marg_b)
|
|
|
|
scroll(term, term->marg_t, term->marg_b, 1, true);
|
|
|
|
else if (term->curs.y < term->rows - 1)
|
|
|
|
term->curs.y++;
|
|
|
|
term->curs.x = 0;
|
|
|
|
term->wrapnext = false;
|
|
|
|
}
|
2018-05-18 13:16:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
seen_disp_event(term);
|
|
|
|
}
|
|
|
|
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
static strbuf *term_input_data_from_unicode(
|
2024-09-24 08:37:36 +00:00
|
|
|
Terminal *term, const wchar_t *widebuf, size_t len)
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
{
|
|
|
|
strbuf *buf = strbuf_new();
|
|
|
|
|
|
|
|
if (in_utf(term)) {
|
|
|
|
/*
|
|
|
|
* Translate input wide characters into UTF-8 to go in the
|
|
|
|
* terminal's input data queue.
|
|
|
|
*/
|
2024-09-24 08:37:36 +00:00
|
|
|
for (size_t i = 0; i < len; i++) {
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
unsigned long ch = widebuf[i];
|
|
|
|
|
|
|
|
if (IS_SURROGATE(ch)) {
|
|
|
|
#ifdef PLATFORM_IS_UTF16
|
|
|
|
if (i+1 < len) {
|
|
|
|
unsigned long ch2 = widebuf[i+1];
|
|
|
|
if (IS_SURROGATE_PAIR(ch, ch2)) {
|
|
|
|
ch = FROM_SURROGATES(ch, ch2);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* Unrecognised UTF-16 sequence */
|
|
|
|
ch = '.';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-09 18:56:51 +00:00
|
|
|
put_utf8_char(buf, ch);
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Call to the character-set subsystem to translate into
|
|
|
|
* whatever charset the terminal is currently configured in.
|
|
|
|
*
|
|
|
|
* Since the terminal doesn't currently support any multibyte
|
|
|
|
* character set other than UTF-8, we can assume here that
|
|
|
|
* there will be at most one output byte per input wchar_t.
|
2019-07-02 20:22:01 +00:00
|
|
|
* (But also we must allow space for the trailing NUL that
|
|
|
|
* wc_to_mb will write.)
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
*/
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
put_wc_to_mb(buf, term->ucsdata->line_codepage, widebuf, len, "");
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static strbuf *term_input_data_from_charset(
|
2024-09-24 08:37:36 +00:00
|
|
|
Terminal *term, int codepage, const char *str, size_t len)
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
{
|
2024-11-21 13:10:22 +00:00
|
|
|
if (codepage < 0) {
|
|
|
|
strbuf *buf = strbuf_new();
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
put_data(buf, str, len);
|
2024-11-21 13:10:22 +00:00
|
|
|
return buf;
|
|
|
|
} else {
|
|
|
|
strbuf *wide = strbuf_new();
|
|
|
|
put_mb_to_wc(wide, codepage, str, len);
|
|
|
|
strbuf *buf = term_input_data_from_unicode(
|
|
|
|
term, (const wchar_t *)wide->s, wide->len / sizeof(wchar_t));
|
|
|
|
strbuf_free(wide);
|
|
|
|
return buf;
|
|
|
|
}
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-06-17 19:21:06 +00:00
|
|
|
static inline void term_bracketed_paste_start(Terminal *term)
|
|
|
|
{
|
|
|
|
ptrlen seq = PTRLEN_LITERAL("\033[200~");
|
|
|
|
if (term->ldisc)
|
|
|
|
ldisc_send(term->ldisc, seq.ptr, seq.len, false);
|
|
|
|
term->bracketed_paste_active = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void term_bracketed_paste_stop(Terminal *term)
|
|
|
|
{
|
|
|
|
if (!term->bracketed_paste_active)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ptrlen seq = PTRLEN_LITERAL("\033[201~");
|
|
|
|
if (term->ldisc)
|
|
|
|
ldisc_send(term->ldisc, seq.ptr, seq.len, false);
|
|
|
|
term->bracketed_paste_active = false;
|
|
|
|
}
|
|
|
|
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
static inline void term_keyinput_internal(
|
|
|
|
Terminal *term, const void *buf, int len, bool interactive)
|
|
|
|
{
|
2019-06-17 19:21:06 +00:00
|
|
|
if (term->srm_echo) {
|
|
|
|
/*
|
|
|
|
* Implement the terminal-level local echo behaviour that
|
|
|
|
* ECMA-48 specifies when terminal mode 12 is configured off
|
|
|
|
* (ESC[12l). In this mode, data input to the terminal via the
|
|
|
|
* keyboard is also added to the output buffer. But this
|
|
|
|
* doesn't apply to escape sequences generated as session
|
|
|
|
* input _within_ the terminal, e.g. in response to terminal
|
|
|
|
* query sequences, or the bracketing sequences of bracketed
|
|
|
|
* paste mode. Those will be sent directly via
|
|
|
|
* ldisc_send(term->ldisc, ...) and won't go through this
|
|
|
|
* function.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Mimic the special case of negative length in ldisc_send */
|
|
|
|
int true_len = len >= 0 ? len : strlen(buf);
|
|
|
|
|
|
|
|
bufchain_add(&term->inbuf, buf, true_len);
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
term_added_data(term, false);
|
2019-06-17 19:21:06 +00:00
|
|
|
}
|
2019-08-03 17:09:05 +00:00
|
|
|
if (interactive)
|
|
|
|
term_bracketed_paste_stop(term);
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
if (term->ldisc)
|
|
|
|
ldisc_send(term->ldisc, buf, len, interactive);
|
|
|
|
term_seen_key_event(term);
|
|
|
|
}
|
|
|
|
|
2019-03-04 20:53:41 +00:00
|
|
|
unsigned long term_translate(
|
|
|
|
Terminal *term, struct term_utf8_decode *utf8, unsigned char c)
|
2019-03-04 20:52:15 +00:00
|
|
|
{
|
|
|
|
if (in_utf(term)) {
|
2019-03-04 20:53:41 +00:00
|
|
|
switch (utf8->state) {
|
2019-03-04 20:52:15 +00:00
|
|
|
case 0:
|
|
|
|
if (c < 0x80) {
|
|
|
|
/* UTF-8 must be stateless so we ignore iso2022. */
|
|
|
|
if (term->ucsdata->unitab_ctrl[c] != 0xFF) {
|
|
|
|
return term->ucsdata->unitab_ctrl[c];
|
|
|
|
} else if ((term->utf8linedraw) &&
|
|
|
|
(term->cset_attr[term->cset] == CSET_LINEDRW)) {
|
|
|
|
/* Linedraw characters are explicitly enabled */
|
|
|
|
return c | CSET_LINEDRW;
|
|
|
|
} else {
|
|
|
|
return c | CSET_ASCII;
|
|
|
|
}
|
|
|
|
} else if ((c & 0xe0) == 0xc0) {
|
2019-03-04 20:53:41 +00:00
|
|
|
utf8->size = utf8->state = 1;
|
|
|
|
utf8->chr = (c & 0x1f);
|
2019-03-04 20:52:15 +00:00
|
|
|
} else if ((c & 0xf0) == 0xe0) {
|
2019-03-04 20:53:41 +00:00
|
|
|
utf8->size = utf8->state = 2;
|
|
|
|
utf8->chr = (c & 0x0f);
|
2019-03-04 20:52:15 +00:00
|
|
|
} else if ((c & 0xf8) == 0xf0) {
|
2019-03-04 20:53:41 +00:00
|
|
|
utf8->size = utf8->state = 3;
|
|
|
|
utf8->chr = (c & 0x07);
|
2019-03-04 20:52:15 +00:00
|
|
|
} else if ((c & 0xfc) == 0xf8) {
|
2019-03-04 20:53:41 +00:00
|
|
|
utf8->size = utf8->state = 4;
|
|
|
|
utf8->chr = (c & 0x03);
|
2019-03-04 20:52:15 +00:00
|
|
|
} else if ((c & 0xfe) == 0xfc) {
|
2019-03-04 20:53:41 +00:00
|
|
|
utf8->size = utf8->state = 5;
|
|
|
|
utf8->chr = (c & 0x01);
|
2019-03-04 20:52:15 +00:00
|
|
|
} else {
|
|
|
|
return UCSINVALID;
|
|
|
|
}
|
|
|
|
return UCSINCOMPLETE;
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
case 3:
|
|
|
|
case 4:
|
|
|
|
case 5:
|
|
|
|
if ((c & 0xC0) != 0x80) {
|
2019-03-04 20:53:41 +00:00
|
|
|
utf8->state = 0;
|
2019-03-04 20:52:15 +00:00
|
|
|
return UCSTRUNCATED; /* caller will then give us the
|
|
|
|
* same byte again */
|
|
|
|
}
|
2019-03-04 20:53:41 +00:00
|
|
|
utf8->chr = (utf8->chr << 6) | (c & 0x3f);
|
|
|
|
if (--utf8->state)
|
2019-03-04 20:52:15 +00:00
|
|
|
return UCSINCOMPLETE;
|
|
|
|
|
2019-03-04 20:53:41 +00:00
|
|
|
unsigned long t = utf8->chr;
|
2019-03-04 20:52:15 +00:00
|
|
|
|
|
|
|
/* Is somebody trying to be evil! */
|
|
|
|
if (t < 0x80 ||
|
2019-03-04 20:53:41 +00:00
|
|
|
(t < 0x800 && utf8->size >= 2) ||
|
|
|
|
(t < 0x10000 && utf8->size >= 3) ||
|
|
|
|
(t < 0x200000 && utf8->size >= 4) ||
|
|
|
|
(t < 0x4000000 && utf8->size >= 5))
|
2019-03-04 20:52:15 +00:00
|
|
|
return UCSINVALID;
|
|
|
|
|
|
|
|
/* Unicode line separator and paragraph separator are CR-LF */
|
|
|
|
if (t == 0x2028 || t == 0x2029)
|
|
|
|
return 0x85;
|
|
|
|
|
|
|
|
/* High controls are probably a Baaad idea too. */
|
|
|
|
if (t < 0xA0)
|
|
|
|
return 0xFFFD;
|
|
|
|
|
|
|
|
/* The UTF-16 surrogates are not nice either. */
|
2019-09-08 19:29:00 +00:00
|
|
|
/* The standard give the option of decoding these:
|
2019-03-04 20:52:15 +00:00
|
|
|
* I don't want to! */
|
|
|
|
if (t >= 0xD800 && t < 0xE000)
|
|
|
|
return UCSINVALID;
|
|
|
|
|
|
|
|
/* ISO 10646 characters now limited to UTF-16 range. */
|
|
|
|
if (t > 0x10FFFF)
|
|
|
|
return UCSINVALID;
|
|
|
|
|
|
|
|
/* U+FEFF is best seen as a null. */
|
|
|
|
if (t == 0xFEFF)
|
|
|
|
return UCSINCOMPLETE;
|
|
|
|
/* But U+FFFE is an error. */
|
|
|
|
if (t == 0xFFFE || t == 0xFFFF)
|
|
|
|
return UCSINVALID;
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
} else if (term->sco_acs &&
|
2019-03-04 20:52:15 +00:00
|
|
|
(c!='\033' && c!='\012' && c!='\015' && c!='\b')) {
|
|
|
|
/* Are we in the nasty ACS mode? Note: no sco in utf mode. */
|
|
|
|
if (term->sco_acs == 2)
|
|
|
|
c |= 0x80;
|
|
|
|
|
|
|
|
return c | CSET_SCOACS;
|
|
|
|
} else {
|
|
|
|
switch (term->cset_attr[term->cset]) {
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
2019-03-04 20:52:15 +00:00
|
|
|
* Linedraw characters are different from 'ESC ( B'
|
|
|
|
* only for a small range. For ones outside that
|
|
|
|
* range, make sure we use the same font as well as
|
|
|
|
* the same encoding.
|
|
|
|
*/
|
|
|
|
case CSET_LINEDRW:
|
|
|
|
if (term->ucsdata->unitab_ctrl[c] != 0xFF)
|
|
|
|
return term->ucsdata->unitab_ctrl[c];
|
|
|
|
else
|
|
|
|
return c | CSET_LINEDRW;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CSET_GBCHR:
|
|
|
|
/* If UK-ASCII, make the '#' a LineDraw Pound */
|
|
|
|
if (c == '#')
|
|
|
|
return '}' | CSET_LINEDRW;
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
case CSET_ASCII:
|
|
|
|
if (term->ucsdata->unitab_ctrl[c] != 0xFF)
|
|
|
|
return term->ucsdata->unitab_ctrl[c];
|
|
|
|
else
|
|
|
|
return c | CSET_ASCII;
|
|
|
|
break;
|
|
|
|
case CSET_SCOACS:
|
|
|
|
if (c >= ' ')
|
|
|
|
return c | CSET_SCOACS;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Remove everything currently in `inbuf' and stick it up on the
|
|
|
|
* in-memory display. There's a big state machine in here to
|
|
|
|
* process escape sequences...
|
|
|
|
*/
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
static void term_out(Terminal *term, bool called_from_term_data)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
unsigned long c;
|
|
|
|
int unget;
|
2021-12-18 15:07:41 +00:00
|
|
|
const unsigned char *chars;
|
|
|
|
size_t nchars_got = 0, nchars_used = 0;
|
2001-09-18 19:41:07 +00:00
|
|
|
|
2021-12-12 14:39:50 +00:00
|
|
|
/*
|
|
|
|
* During drag-selects, we do not process terminal input, because
|
|
|
|
* the user will want the screen to hold still to be selected.
|
|
|
|
*/
|
|
|
|
if (term->selstate == DRAGGING)
|
|
|
|
return;
|
|
|
|
|
2001-09-18 19:41:07 +00:00
|
|
|
unget = -1;
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
chars = NULL; /* placate compiler warnings */
|
2021-12-18 15:07:41 +00:00
|
|
|
while (nchars_got < nchars_used ||
|
|
|
|
unget != -1 ||
|
|
|
|
bufchain_size(&term->inbuf) > 0) {
|
|
|
|
if (unget != -1) {
|
|
|
|
/*
|
|
|
|
* Handle a character we left in 'unget' the last time
|
|
|
|
* round this loop. This happens if a UTF-8 sequence is
|
|
|
|
* aborted early, by containing fewer continuation bytes
|
|
|
|
* than its introducer expected: the non-continuation byte
|
|
|
|
* that interrupted the sequence must now be processed
|
|
|
|
* as a fresh piece of input in its own right.
|
|
|
|
*/
|
|
|
|
c = unget;
|
|
|
|
unget = -1;
|
|
|
|
} else {
|
Suspend terminal output while a window resize is pending.
This is the payoff from the last few commits of refactoring. It fixes
the following race-condition bug in terminal application redraw:
* server sends a window-resizing escape sequence
* terminal requests a window resize from the front end
* server sends further escape sequences to perform a redraw of some
full-screen application, which assume that the window resize has
occurred and the window is already its new size
* terminal processes all those sequences in the context of the old
window size, while the front end is still thinking
* window resize completes in the front end and term_size() tells the
terminal it now has its new size, but it's too late, the screen
redraw has made a total mess.
(Perhaps the server might even send its window resize + followup
redraw all in one SSH packet, so that it's all queued in term->inbuf
in one go.)
As far as I can see, handling of this case has been broken more or
less forever in the GTK frontend (where window resizes are inherently
asynchronous due to the way X11 works, and we've never done anything
to compensate for that). On Windows, where window size is changed via
SetWindowPos which is synchronous, it used to work, but broke in
commit d74308e90e3813a (i.e. between 0.74 and 0.75), which made all
the ancillary window updates run on the same delayed-action timer as
ordinary text display.
So, it's time to fix it, and I think now I should be able to fix it in
GTK as well as on Windows.
Now, as soon as we've set the term->win_resize_pending flag (in
response to a resize escape sequence), the next return to the top of
the main loop in term_out will terminate output processing early,
leaving any further terminal data still in the term->inbuf bufchain.
Once we get a term_size() callback from the front end telling us our
new size, we reset term->win_resize_pending, which unblocks output
processing again, and we also queue a toplevel callback to have
another try at term_out() so that it will be unblocked promptly.
To implement this I've changed term->win_resize_pending from a bool
into a three-state enumeration, so that we can tell the difference
between 'pending' in the sense of not yet having sent our resize
request to the frontend, and in the sense of waiting for the frontend
to reply. That way, a window resize from the GUI user at least won't
be mistaken for the response to our resize request if it arrives in
the former state. (It can still be mistaken for one in the latter
case, but if the user is resizing the window at the same time as the
server-side application is doing critically size-dependent redrawing,
I don't think there can be any reasonable expectation of nothing going
wrong.)
As mentioned in the previous commit, some failure modes under X11 (in
particular the window manager process getting wedged in some way) can
result in no response being received to a ConfigureWindow request. In
that situation, it seems to me that we really _shouldn't_ sit there
waiting forever - perhaps it's technically the WM's fault and not
ours, but what kind of X window are you most likely to want to use to
do emergency WM repair? A terminal window, of course, so it would be
exceptionally unhelpful to make any terminal window stop working
completely in this situation! Hence, there's a fallback timeout in
terminal.c, so that if we don't receive a response in _too_ long,
we'll assume one is not forthcoming, and resume processing terminal
data at the old window size. The fallback timeout is set to 5 seconds,
following existing practice in libXt (DEFAULT_WM_TIMEOUT).
2021-12-19 10:37:02 +00:00
|
|
|
/*
|
|
|
|
* If we're waiting for a terminal resize triggered by an
|
|
|
|
* escape sequence, we defer processing the terminal
|
|
|
|
* output until we receive acknowledgment from the front
|
|
|
|
* end that the resize has happened, so that further
|
|
|
|
* output will be processed in the context of the new
|
|
|
|
* size.
|
|
|
|
*
|
|
|
|
* This test goes inside the main while-loop, so that we
|
|
|
|
* exit early if we encounter a resize escape sequence
|
|
|
|
* part way through term->inbuf.
|
|
|
|
*
|
|
|
|
* It's also in the branch of this if statement that
|
|
|
|
* doesn't deal with a character left in 'unget' by the
|
|
|
|
* previous loop iteration, because if we break out of
|
|
|
|
* this loop with an ungot character still pending, we'll
|
|
|
|
* lose it. (And in any case, if the previous thing that
|
|
|
|
* happened was a truncated UTF-8 sequence, then it won't
|
|
|
|
* have scheduled a pending resize.)
|
|
|
|
*/
|
|
|
|
if (term->win_resize_pending != WIN_RESIZE_NO)
|
|
|
|
break;
|
|
|
|
|
2021-12-18 15:07:41 +00:00
|
|
|
if (nchars_got == nchars_used) {
|
|
|
|
/* Delete the previous chunk from the bufchain */
|
|
|
|
bufchain_consume(&term->inbuf, nchars_used);
|
|
|
|
nchars_used = 0;
|
|
|
|
|
|
|
|
if (bufchain_size(&term->inbuf) == 0)
|
|
|
|
break; /* no more data */
|
|
|
|
|
2019-02-06 20:46:45 +00:00
|
|
|
ptrlen data = bufchain_prefix(&term->inbuf);
|
2021-12-18 15:07:41 +00:00
|
|
|
chars = data.ptr;
|
|
|
|
nchars_got = data.len;
|
2019-09-08 19:29:00 +00:00
|
|
|
assert(chars != NULL);
|
2021-12-18 15:07:41 +00:00
|
|
|
assert(nchars_used < nchars_got);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
2021-12-18 15:07:41 +00:00
|
|
|
c = chars[nchars_used++];
|
2019-09-08 19:29:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Optionally log the session traffic to a file. Useful for
|
|
|
|
* debugging and possibly also useful for actual logging.
|
|
|
|
*/
|
|
|
|
if (term->logtype == LGTYP_DEBUG && term->logctx)
|
|
|
|
logtraffic(term->logctx, (unsigned char) c, LGTYP_DEBUG);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note only VT220+ are 8-bit VT102 is seven bit, it shouldn't even
|
|
|
|
* be able to display 8-bit characters, but I'll let that go 'cause
|
|
|
|
* of i18n.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're printing, add the character to the printer
|
|
|
|
* buffer.
|
|
|
|
*/
|
|
|
|
if (term->printing) {
|
|
|
|
bufchain_add(&term->printer_buf, &c, 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're in print-only mode, we use a much simpler
|
|
|
|
* state machine designed only to recognise the ESC[4i
|
|
|
|
* termination sequence.
|
|
|
|
*/
|
|
|
|
if (term->only_printing) {
|
|
|
|
if (c == '\033')
|
|
|
|
term->print_state = 1;
|
|
|
|
else if (c == (unsigned char)'\233')
|
|
|
|
term->print_state = 2;
|
|
|
|
else if (c == '[' && term->print_state == 1)
|
|
|
|
term->print_state = 2;
|
|
|
|
else if (c == '4' && term->print_state == 2)
|
|
|
|
term->print_state = 3;
|
|
|
|
else if (c == 'i' && term->print_state == 3)
|
|
|
|
term->print_state = 4;
|
|
|
|
else
|
|
|
|
term->print_state = 0;
|
|
|
|
if (term->print_state == 4) {
|
|
|
|
term_print_finish(term);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do character-set translation. */
|
|
|
|
if (term->termstate == TOPLEVEL) {
|
2019-03-04 20:53:41 +00:00
|
|
|
unsigned long t = term_translate(term, &term->utf8, c);
|
2019-03-04 20:52:15 +00:00
|
|
|
switch (t) {
|
|
|
|
case UCSINCOMPLETE:
|
|
|
|
continue; /* didn't complete a multibyte char */
|
|
|
|
case UCSTRUNCATED:
|
|
|
|
unget = c;
|
|
|
|
/* fall through */
|
|
|
|
case UCSINVALID:
|
|
|
|
c = UCSERR;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
c = t;
|
|
|
|
break;
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* How about C1 controls?
|
|
|
|
* Explicitly ignore SCI (0x9a), which we don't translate to DECID.
|
|
|
|
*/
|
|
|
|
if ((c & -32) == 0x80 && term->termstate < DO_CTRLS &&
|
|
|
|
!term->vt52_mode && has_compat(VT220)) {
|
|
|
|
if (c == 0x9a)
|
|
|
|
c = 0;
|
|
|
|
else {
|
|
|
|
term->termstate = SEEN_ESC;
|
|
|
|
term->esc_query = 0;
|
|
|
|
c = '@' + (c & 0x1F);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Or the GL control. */
|
|
|
|
if (c == '\177' && term->termstate < DO_CTRLS && has_compat(OTHER)) {
|
|
|
|
if (term->curs.x && !term->wrapnext)
|
|
|
|
term->curs.x--;
|
|
|
|
term->wrapnext = false;
|
|
|
|
/* destructive backspace might be disabled */
|
|
|
|
if (!term->no_dbackspace) {
|
|
|
|
check_boundary(term, term->curs.x, term->curs.y);
|
|
|
|
check_boundary(term, term->curs.x+1, term->curs.y);
|
|
|
|
copy_termchar(scrlineptr(term->curs.y),
|
|
|
|
term->curs.x, &term->erase_char);
|
|
|
|
}
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
seen_disp_event(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
} else
|
2021-02-07 19:59:20 +00:00
|
|
|
/* Or normal C0 controls. */
|
2019-09-08 19:29:00 +00:00
|
|
|
if ((c & ~0x1F) == 0 && term->termstate < DO_CTRLS) {
|
|
|
|
switch (c) {
|
|
|
|
case '\005': /* ENQ: terminal type query */
|
|
|
|
/*
|
|
|
|
* Strictly speaking this is VT100 but a VT100 defaults to
|
|
|
|
* no response. Other terminals respond at their option.
|
|
|
|
*
|
|
|
|
* Don't put a CR in the default string as this tends to
|
|
|
|
* upset some weird software.
|
|
|
|
*/
|
|
|
|
compatibility(ANSIMIN);
|
|
|
|
if (term->ldisc) {
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
strbuf *buf = term_input_data_from_charset(
|
|
|
|
term, DEFAULT_CODEPAGE,
|
2022-09-13 14:00:26 +00:00
|
|
|
term->answerback->s, term->answerback->len);
|
2020-06-14 09:12:59 +00:00
|
|
|
ldisc_send(term->ldisc, buf->s, buf->len, false);
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
strbuf_free(buf);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case '\007': { /* BEL: Bell */
|
2022-05-11 19:07:31 +00:00
|
|
|
if (term->termstate == SEEN_OSC ||
|
|
|
|
term->termstate == SEEN_OSC_W) {
|
|
|
|
/*
|
|
|
|
* In an OSC context, BEL is one of the ways to terminate
|
|
|
|
* the whole sequence. We process it as such even if we
|
|
|
|
* haven't got into the final OSC_STRING state yet, so that
|
|
|
|
* OSC sequences without a string will be handled cleanly.
|
|
|
|
*/
|
|
|
|
do_osc(term);
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
struct beeptime *newbeep;
|
|
|
|
unsigned long ticks;
|
|
|
|
|
|
|
|
ticks = GETTICKCOUNT();
|
|
|
|
|
|
|
|
if (!term->beep_overloaded) {
|
2021-02-07 19:59:20 +00:00
|
|
|
newbeep = snew(struct beeptime);
|
|
|
|
newbeep->ticks = ticks;
|
|
|
|
newbeep->next = NULL;
|
|
|
|
if (!term->beephead)
|
|
|
|
term->beephead = newbeep;
|
|
|
|
else
|
|
|
|
term->beeptail->next = newbeep;
|
|
|
|
term->beeptail = newbeep;
|
|
|
|
term->nbeeps++;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
/*
|
|
|
|
* Throw out any beeps that happened more than
|
|
|
|
* t seconds ago.
|
|
|
|
*/
|
|
|
|
while (term->beephead &&
|
|
|
|
term->beephead->ticks < ticks - term->bellovl_t) {
|
2021-02-07 19:59:20 +00:00
|
|
|
struct beeptime *tmp = term->beephead;
|
|
|
|
term->beephead = tmp->next;
|
|
|
|
sfree(tmp);
|
|
|
|
if (!term->beephead)
|
|
|
|
term->beeptail = NULL;
|
|
|
|
term->nbeeps--;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
if (term->bellovl && term->beep_overloaded &&
|
|
|
|
ticks - term->lastbeep >= (unsigned)term->bellovl_s) {
|
2021-02-07 19:59:20 +00:00
|
|
|
/*
|
|
|
|
* If we're currently overloaded and the
|
|
|
|
* last beep was more than s seconds ago,
|
|
|
|
* leave overload mode.
|
|
|
|
*/
|
|
|
|
term->beep_overloaded = false;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
} else if (term->bellovl && !term->beep_overloaded &&
|
|
|
|
term->nbeeps >= term->bellovl_n) {
|
2021-02-07 19:59:20 +00:00
|
|
|
/*
|
|
|
|
* Now, if we have n or more beeps
|
|
|
|
* remaining in the queue, go into overload
|
|
|
|
* mode.
|
|
|
|
*/
|
|
|
|
term->beep_overloaded = true;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
|
|
|
term->lastbeep = ticks;
|
2019-09-08 19:29:00 +00:00
|
|
|
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
/*
|
|
|
|
* Perform an actual beep if we're not overloaded.
|
|
|
|
*/
|
|
|
|
if (!term->bellovl || !term->beep_overloaded) {
|
2021-02-07 19:59:20 +00:00
|
|
|
win_bell(term->win, term->beep);
|
2019-09-08 19:29:00 +00:00
|
|
|
|
2021-02-07 19:59:20 +00:00
|
|
|
if (term->beep == BELL_VISUAL) {
|
|
|
|
term_schedule_vbell(term, false, 0);
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
seen_disp_event(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
case '\b': /* BS: Back space */
|
Fix behaviour of backspace in a 1-column terminal.
This is the first bug found as a direct result of writing that
terminal test program - I added some tests for things I expected to
work already, and some of them didn't, proving immediately that it was
a good idea!
If the terminal is one column wide, and you've printed a
character (hence, set the wrapnext flag), what should backspace do?
Surely it should behave like any other backspace with wrapnext set,
i.e. clear the wrapnext flag, returning the cursor's _logical_
position to the location of the most recently printed character. But
in fact it was anti-wrapping to the previous line, because I'd got the
cases in the wrong order in the if-else chain that forms the backspace
handler. So the handler for 'we're in column 0, wrapping time' was
coming before 'wrapnext is set, just clear it'.
Now wrapnext is checked _first_, before checking anything at all. Any
time we can just clear that, we should.
2023-03-05 10:01:36 +00:00
|
|
|
if (term->wrapnext) {
|
|
|
|
term->wrapnext = false;
|
|
|
|
} else if (term->curs.x == 0 &&
|
|
|
|
(term->curs.y == 0 || !term->wrap)) {
|
Make backspace take account of LATTR_WRAPPED2.
Suppose an application tries to print a double-width character
starting in the rightmost column of the screen, so that we apply our
emergency fix of wrapping to the next line immediately and printing
the character in the first two columns. Suppose they then backspace
twice, taking the cursor to the RHS and then the LHS of that
character. What should happen if they backspace a third time?
Our previous behaviour was to completely ignore the unusual situation,
and do the same thing we'd do in any other backspace from column 0:
anti-wrap the cursor to the last column of the previous line, leaving
it in the empty character cell that was skipped when the DW char
couldn't be printed in it.
But I think this isn't the best response, because it breaks the
invariant that printing N columns' worth of graphic characters and
then backspacing N times should leave the cursor on the first of those
characters. If I print "a가" (for example) and then backspace three
times, I want the cursor on the a, _even_ if weird line wrapping
behaviour happened somewhere in that sequence.
(Rationale: this helps naïve terminal applications which don't even
know what the terminal width is, and aren't tracking their absolute x
position. In particular, the simplistic line-based input systems that
appear in OS kernels and our own lineedit.c will want to emit a fixed
number of backspace-space-backspace sequences to delete characters
previously entered on to the line by the user. They still need to
check the wcwidth of the characters they're emitting, so that they can
BSB twice for a DW character or 0 times for a combining one, but it
would be *hugely* more awkward for them to ask the terminal where the
cursor is so that they can take account of difficult line wraps!)
We already have the ability to _recognise_ this situation: on a line
that was wrapped in this unusual way, we set the LATTR_WRAPPED2 line
attribute flag, to prevent the empty rightmost column from injecting
an unwanted space into copy-pastes from the terminal. Now we also use
the same flag to cause the backspace control character to do something
interesting.
This was the fix that inspired me to start writing test_terminal,
because I knew it was touching a delicate area. However, in the course
of writing this fix and its tests, I encountered two (!) further bugs,
which I'll fix in followup commits!
2023-03-05 09:31:06 +00:00
|
|
|
/* do nothing */
|
|
|
|
} else if (term->curs.x == 0 && term->curs.y > 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curs.x = term->cols - 1, term->curs.y--;
|
Make backspace take account of LATTR_WRAPPED2.
Suppose an application tries to print a double-width character
starting in the rightmost column of the screen, so that we apply our
emergency fix of wrapping to the next line immediately and printing
the character in the first two columns. Suppose they then backspace
twice, taking the cursor to the RHS and then the LHS of that
character. What should happen if they backspace a third time?
Our previous behaviour was to completely ignore the unusual situation,
and do the same thing we'd do in any other backspace from column 0:
anti-wrap the cursor to the last column of the previous line, leaving
it in the empty character cell that was skipped when the DW char
couldn't be printed in it.
But I think this isn't the best response, because it breaks the
invariant that printing N columns' worth of graphic characters and
then backspacing N times should leave the cursor on the first of those
characters. If I print "a가" (for example) and then backspace three
times, I want the cursor on the a, _even_ if weird line wrapping
behaviour happened somewhere in that sequence.
(Rationale: this helps naïve terminal applications which don't even
know what the terminal width is, and aren't tracking their absolute x
position. In particular, the simplistic line-based input systems that
appear in OS kernels and our own lineedit.c will want to emit a fixed
number of backspace-space-backspace sequences to delete characters
previously entered on to the line by the user. They still need to
check the wcwidth of the characters they're emitting, so that they can
BSB twice for a DW character or 0 times for a combining one, but it
would be *hugely* more awkward for them to ask the terminal where the
cursor is so that they can take account of difficult line wraps!)
We already have the ability to _recognise_ this situation: on a line
that was wrapped in this unusual way, we set the LATTR_WRAPPED2 line
attribute flag, to prevent the empty rightmost column from injecting
an unwanted space into copy-pastes from the terminal. Now we also use
the same flag to cause the backspace control character to do something
interesting.
This was the fix that inspired me to start writing test_terminal,
because I knew it was touching a delicate area. However, in the course
of writing this fix and its tests, I encountered two (!) further bugs,
which I'll fix in followup commits!
2023-03-05 09:31:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the line we've just wrapped back on to had the
|
|
|
|
* LATTR_WRAPPED2 flag set, it means that the line wrapped
|
|
|
|
* because a double-width character was printed with the
|
|
|
|
* cursor in the rightmost column, and the best handling
|
|
|
|
* available was to leave that column empty and move the
|
|
|
|
* whole character to the next line. In that situation,
|
|
|
|
* backspacing needs to put the cursor on the previous
|
|
|
|
* _logical_ character, i.e. skip the empty space left by
|
|
|
|
* the wrapping. This arranges that if an application
|
|
|
|
* unaware of the terminal width or cursor position prints
|
|
|
|
* a number of printing characters and then tries to return
|
|
|
|
* to a particular one of them by emitting the right number
|
|
|
|
* of backspaces, it's still the right number even if a
|
|
|
|
* line break appeared in a maximally awkward position.
|
|
|
|
*/
|
|
|
|
termline *ldata = scrlineptr(term->curs.y);
|
|
|
|
if (term->curs.x > 0 && (ldata->lattr & LATTR_WRAPPED2))
|
|
|
|
term->curs.x--;
|
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curs.x--;
|
Make backspace take account of LATTR_WRAPPED2.
Suppose an application tries to print a double-width character
starting in the rightmost column of the screen, so that we apply our
emergency fix of wrapping to the next line immediately and printing
the character in the first two columns. Suppose they then backspace
twice, taking the cursor to the RHS and then the LHS of that
character. What should happen if they backspace a third time?
Our previous behaviour was to completely ignore the unusual situation,
and do the same thing we'd do in any other backspace from column 0:
anti-wrap the cursor to the last column of the previous line, leaving
it in the empty character cell that was skipped when the DW char
couldn't be printed in it.
But I think this isn't the best response, because it breaks the
invariant that printing N columns' worth of graphic characters and
then backspacing N times should leave the cursor on the first of those
characters. If I print "a가" (for example) and then backspace three
times, I want the cursor on the a, _even_ if weird line wrapping
behaviour happened somewhere in that sequence.
(Rationale: this helps naïve terminal applications which don't even
know what the terminal width is, and aren't tracking their absolute x
position. In particular, the simplistic line-based input systems that
appear in OS kernels and our own lineedit.c will want to emit a fixed
number of backspace-space-backspace sequences to delete characters
previously entered on to the line by the user. They still need to
check the wcwidth of the characters they're emitting, so that they can
BSB twice for a DW character or 0 times for a combining one, but it
would be *hugely* more awkward for them to ask the terminal where the
cursor is so that they can take account of difficult line wraps!)
We already have the ability to _recognise_ this situation: on a line
that was wrapped in this unusual way, we set the LATTR_WRAPPED2 line
attribute flag, to prevent the empty rightmost column from injecting
an unwanted space into copy-pastes from the terminal. Now we also use
the same flag to cause the backspace control character to do something
interesting.
This was the fix that inspired me to start writing test_terminal,
because I knew it was touching a delicate area. However, in the course
of writing this fix and its tests, I encountered two (!) further bugs,
which I'll fix in followup commits!
2023-03-05 09:31:06 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case '\016': /* LS1: Locking-shift one */
|
|
|
|
compatibility(VT100);
|
|
|
|
term->cset = 1;
|
|
|
|
break;
|
|
|
|
case '\017': /* LS0: Locking-shift zero */
|
|
|
|
compatibility(VT100);
|
|
|
|
term->cset = 0;
|
|
|
|
break;
|
|
|
|
case '\033': /* ESC: Escape */
|
|
|
|
if (term->vt52_mode)
|
|
|
|
term->termstate = VT52_ESC;
|
2022-05-11 19:07:31 +00:00
|
|
|
else if (term->termstate == SEEN_OSC ||
|
|
|
|
term->termstate == SEEN_OSC_W) {
|
|
|
|
/* Be prepared to terminate an OSC early */
|
|
|
|
term->termstate = OSC_MAYBE_ST;
|
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
compatibility(ANSIMIN);
|
|
|
|
term->termstate = SEEN_ESC;
|
|
|
|
term->esc_query = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '\015': /* CR: Carriage return */
|
|
|
|
term->curs.x = 0;
|
|
|
|
term->wrapnext = false;
|
|
|
|
seen_disp_event(term);
|
|
|
|
|
|
|
|
if (term->crhaslf) {
|
|
|
|
if (term->curs.y == term->marg_b)
|
|
|
|
scroll(term, term->marg_t, term->marg_b, 1, true);
|
|
|
|
else if (term->curs.y < term->rows - 1)
|
|
|
|
term->curs.y++;
|
|
|
|
}
|
|
|
|
if (term->logctx)
|
|
|
|
logtraffic(term->logctx, (unsigned char) c, LGTYP_ASCII);
|
|
|
|
break;
|
|
|
|
case '\014': /* FF: Form feed */
|
|
|
|
if (has_compat(SCOANSI)) {
|
|
|
|
move(term, 0, 0, 0);
|
|
|
|
erase_lots(term, false, false, true);
|
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
|
|
|
term->wrapnext = false;
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case '\013': /* VT: Line tabulation */
|
|
|
|
compatibility(VT100);
|
|
|
|
case '\012': /* LF: Line feed */
|
|
|
|
if (term->curs.y == term->marg_b)
|
|
|
|
scroll(term, term->marg_t, term->marg_b, 1, true);
|
|
|
|
else if (term->curs.y < term->rows - 1)
|
|
|
|
term->curs.y++;
|
|
|
|
if (term->lfhascr)
|
|
|
|
term->curs.x = 0;
|
|
|
|
term->wrapnext = false;
|
|
|
|
seen_disp_event(term);
|
|
|
|
if (term->logctx)
|
|
|
|
logtraffic(term->logctx, (unsigned char) c, LGTYP_ASCII);
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case '\t': { /* HT: Character tabulation */
|
|
|
|
pos old_curs = term->curs;
|
|
|
|
termline *ldata = scrlineptr(term->curs.y);
|
|
|
|
|
|
|
|
do {
|
2021-02-07 19:59:20 +00:00
|
|
|
term->curs.x++;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
} while (term->curs.x < term->cols - 1 &&
|
|
|
|
!term->tabs[term->curs.x]);
|
|
|
|
|
|
|
|
if ((ldata->lattr & LATTR_MODE) != LATTR_NORM) {
|
2021-02-07 19:59:20 +00:00
|
|
|
if (term->curs.x >= term->cols / 2)
|
|
|
|
term->curs.x = term->cols / 2 - 1;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
} else {
|
2021-02-07 19:59:20 +00:00
|
|
|
if (term->curs.x >= term->cols)
|
|
|
|
term->curs.x = term->cols - 1;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
|
|
|
|
check_selection(term, old_curs, term->curs);
|
2019-09-08 19:29:00 +00:00
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
switch (term->termstate) {
|
|
|
|
case TOPLEVEL:
|
|
|
|
/* Only graphic characters get this far;
|
|
|
|
* ctrls are stripped above */
|
|
|
|
term_display_graphic_char(term, c);
|
|
|
|
term->last_graphic_char = c;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OSC_MAYBE_ST:
|
|
|
|
/*
|
|
|
|
* This state is virtually identical to SEEN_ESC, with the
|
|
|
|
* exception that we have an OSC sequence in the pipeline,
|
|
|
|
* and _if_ we see a backslash, we process it.
|
|
|
|
*/
|
|
|
|
if (c == '\\') {
|
|
|
|
do_osc(term);
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* else fall through */
|
|
|
|
case SEEN_ESC:
|
|
|
|
if (c >= ' ' && c <= '/') {
|
|
|
|
if (term->esc_query)
|
|
|
|
term->esc_query = -1;
|
|
|
|
else
|
|
|
|
term->esc_query = c;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
switch (ANSI(c, term->esc_query)) {
|
|
|
|
case '[': /* enter CSI mode */
|
|
|
|
term->termstate = SEEN_CSI;
|
|
|
|
term->esc_nargs = 1;
|
|
|
|
term->esc_args[0] = ARG_DEFAULT;
|
|
|
|
term->esc_query = 0;
|
|
|
|
break;
|
|
|
|
case ']': /* OSC: xterm escape sequences */
|
|
|
|
/* Compatibility is nasty here, xterm, linux, decterm yuk! */
|
|
|
|
compatibility(OTHER);
|
|
|
|
term->termstate = SEEN_OSC;
|
2023-10-19 17:55:04 +00:00
|
|
|
term->osc_is_apc = false;
|
|
|
|
term->osc_strlen = 0;
|
|
|
|
term->esc_args[0] = 0;
|
|
|
|
term->esc_nargs = 1;
|
|
|
|
break;
|
2024-12-26 11:40:38 +00:00
|
|
|
case 'X': /* SOS: Start of String */
|
|
|
|
case '^': /* PM: privacy message */
|
2023-10-19 17:55:04 +00:00
|
|
|
case '_': /* APC: application program command */
|
2024-12-26 11:40:38 +00:00
|
|
|
/* SOS, PM, and APC sequences are just a string, terminated by
|
|
|
|
* ST or (I've observed in practice for APC) ^G. That is,
|
2023-10-19 17:55:04 +00:00
|
|
|
* they have the same termination convention as
|
|
|
|
* OSC. So we handle them by going straight into
|
|
|
|
* OSC_STRING state and setting a flag indicating
|
|
|
|
* that it's not really an OSC. */
|
|
|
|
compatibility(OTHER);
|
|
|
|
term->termstate = SEEN_OSC;
|
|
|
|
term->osc_is_apc = true;
|
2022-10-23 16:43:10 +00:00
|
|
|
term->osc_strlen = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
term->esc_args[0] = 0;
|
|
|
|
term->esc_nargs = 1;
|
|
|
|
break;
|
|
|
|
case '7': /* DECSC: save cursor */
|
|
|
|
compatibility(VT100);
|
|
|
|
save_cursor(term, true);
|
|
|
|
break;
|
|
|
|
case '8': /* DECRC: restore cursor */
|
|
|
|
compatibility(VT100);
|
|
|
|
save_cursor(term, false);
|
|
|
|
break;
|
|
|
|
case '=': /* DECKPAM: Keypad application mode */
|
|
|
|
compatibility(VT100);
|
|
|
|
term->app_keypad_keys = true;
|
|
|
|
break;
|
|
|
|
case '>': /* DECKPNM: Keypad numeric mode */
|
|
|
|
compatibility(VT100);
|
|
|
|
term->app_keypad_keys = false;
|
|
|
|
break;
|
|
|
|
case 'D': /* IND: exactly equivalent to LF */
|
|
|
|
compatibility(VT100);
|
|
|
|
if (term->curs.y == term->marg_b)
|
|
|
|
scroll(term, term->marg_t, term->marg_b, 1, true);
|
|
|
|
else if (term->curs.y < term->rows - 1)
|
|
|
|
term->curs.y++;
|
|
|
|
term->wrapnext = false;
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'E': /* NEL: exactly equivalent to CR-LF */
|
|
|
|
compatibility(VT100);
|
|
|
|
term->curs.x = 0;
|
|
|
|
if (term->curs.y == term->marg_b)
|
|
|
|
scroll(term, term->marg_t, term->marg_b, 1, true);
|
|
|
|
else if (term->curs.y < term->rows - 1)
|
|
|
|
term->curs.y++;
|
|
|
|
term->wrapnext = false;
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'M': /* RI: reverse index - backwards LF */
|
|
|
|
compatibility(VT100);
|
|
|
|
if (term->curs.y == term->marg_t)
|
|
|
|
scroll(term, term->marg_t, term->marg_b, -1, true);
|
|
|
|
else if (term->curs.y > 0)
|
|
|
|
term->curs.y--;
|
|
|
|
term->wrapnext = false;
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'Z': /* DECID: terminal type query */
|
|
|
|
compatibility(VT100);
|
2020-06-14 09:12:59 +00:00
|
|
|
if (term->ldisc)
|
2019-09-08 19:29:00 +00:00
|
|
|
ldisc_send(term->ldisc, term->id_string,
|
|
|
|
strlen(term->id_string), false);
|
|
|
|
break;
|
|
|
|
case 'c': /* RIS: restore power-on settings */
|
|
|
|
compatibility(VT100);
|
|
|
|
power_on(term, true);
|
|
|
|
if (term->ldisc) /* cause ldisc to notice changes */
|
|
|
|
ldisc_echoedit_update(term->ldisc);
|
|
|
|
if (term->reset_132) {
|
2021-12-13 18:49:45 +00:00
|
|
|
if (!term->no_remote_resize)
|
|
|
|
term_request_resize(term, 80, term->rows);
|
2019-09-08 19:29:00 +00:00
|
|
|
term->reset_132 = false;
|
|
|
|
}
|
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'H': /* HTS: set a tab */
|
|
|
|
compatibility(VT100);
|
|
|
|
term->tabs[term->curs.x] = true;
|
|
|
|
break;
|
|
|
|
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case ANSI('8', '#'): { /* DECALN: fills screen with Es :-) */
|
2019-09-08 19:29:00 +00:00
|
|
|
compatibility(VT100);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
termline *ldata;
|
|
|
|
int i, j;
|
|
|
|
pos scrtop, scrbot;
|
|
|
|
|
|
|
|
for (i = 0; i < term->rows; i++) {
|
2021-02-07 19:59:20 +00:00
|
|
|
ldata = scrlineptr(i);
|
|
|
|
check_line_size(term, ldata);
|
|
|
|
for (j = 0; j < term->cols; j++) {
|
|
|
|
copy_termchar(ldata, j,
|
|
|
|
&term->basic_erase_char);
|
|
|
|
ldata->chars[j].chr = 'E';
|
|
|
|
}
|
|
|
|
ldata->lattr = LATTR_NORM;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
|
|
|
seen_disp_event(term);
|
|
|
|
scrtop.x = scrtop.y = 0;
|
|
|
|
scrbot.x = 0;
|
|
|
|
scrbot.y = term->rows;
|
|
|
|
check_selection(term, scrtop, scrbot);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
|
|
|
|
case ANSI('3', '#'):
|
|
|
|
case ANSI('4', '#'):
|
|
|
|
case ANSI('5', '#'):
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case ANSI('6', '#'): {
|
2019-09-08 19:29:00 +00:00
|
|
|
compatibility(VT100);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
int nlattr;
|
|
|
|
termline *ldata;
|
2019-09-08 19:29:00 +00:00
|
|
|
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
switch (ANSI(c, term->esc_query)) {
|
|
|
|
case ANSI('3', '#'): /* DECDHL: 2*height, top */
|
|
|
|
nlattr = LATTR_TOP;
|
|
|
|
break;
|
|
|
|
case ANSI('4', '#'): /* DECDHL: 2*height, bottom */
|
|
|
|
nlattr = LATTR_BOT;
|
|
|
|
break;
|
|
|
|
case ANSI('5', '#'): /* DECSWL: normal */
|
|
|
|
nlattr = LATTR_NORM;
|
|
|
|
break;
|
|
|
|
default: /* case ANSI('6', '#'): DECDWL: 2*width */
|
|
|
|
nlattr = LATTR_WIDE;
|
|
|
|
break;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
ldata = scrlineptr(term->curs.y);
|
|
|
|
check_line_size(term, ldata);
|
|
|
|
check_trust_status(term, ldata);
|
|
|
|
ldata->lattr = nlattr;
|
2023-09-25 18:31:11 +00:00
|
|
|
seen_disp_event(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
/* GZD4: G0 designate 94-set */
|
|
|
|
case ANSI('A', '('):
|
|
|
|
compatibility(VT100);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->cset_attr[0] = CSET_GBCHR;
|
|
|
|
break;
|
|
|
|
case ANSI('B', '('):
|
|
|
|
compatibility(VT100);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->cset_attr[0] = CSET_ASCII;
|
|
|
|
break;
|
|
|
|
case ANSI('0', '('):
|
|
|
|
compatibility(VT100);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->cset_attr[0] = CSET_LINEDRW;
|
|
|
|
break;
|
|
|
|
case ANSI('U', '('):
|
|
|
|
compatibility(OTHER);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->cset_attr[0] = CSET_SCOACS;
|
|
|
|
break;
|
|
|
|
/* G1D4: G1-designate 94-set */
|
|
|
|
case ANSI('A', ')'):
|
|
|
|
compatibility(VT100);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->cset_attr[1] = CSET_GBCHR;
|
|
|
|
break;
|
|
|
|
case ANSI('B', ')'):
|
|
|
|
compatibility(VT100);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->cset_attr[1] = CSET_ASCII;
|
|
|
|
break;
|
|
|
|
case ANSI('0', ')'):
|
|
|
|
compatibility(VT100);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->cset_attr[1] = CSET_LINEDRW;
|
|
|
|
break;
|
|
|
|
case ANSI('U', ')'):
|
|
|
|
compatibility(OTHER);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->cset_attr[1] = CSET_SCOACS;
|
|
|
|
break;
|
|
|
|
/* DOCS: Designate other coding system */
|
|
|
|
case ANSI('8', '%'): /* Old Linux code */
|
|
|
|
case ANSI('G', '%'):
|
|
|
|
compatibility(OTHER);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->utf = true;
|
|
|
|
break;
|
|
|
|
case ANSI('@', '%'):
|
|
|
|
compatibility(OTHER);
|
|
|
|
if (!term->no_remote_charset)
|
|
|
|
term->utf = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SEEN_CSI:
|
|
|
|
term->termstate = TOPLEVEL; /* default */
|
|
|
|
if (isdigit(c)) {
|
|
|
|
if (term->esc_nargs <= ARGS_MAX) {
|
|
|
|
if (term->esc_args[term->esc_nargs - 1] == ARG_DEFAULT)
|
|
|
|
term->esc_args[term->esc_nargs - 1] = 0;
|
|
|
|
if (term->esc_args[term->esc_nargs - 1] <=
|
|
|
|
UINT_MAX / 10 &&
|
|
|
|
term->esc_args[term->esc_nargs - 1] * 10 <=
|
|
|
|
UINT_MAX - c - '0')
|
|
|
|
term->esc_args[term->esc_nargs - 1] =
|
|
|
|
10 * term->esc_args[term->esc_nargs - 1] +
|
|
|
|
c - '0';
|
|
|
|
else
|
|
|
|
term->esc_args[term->esc_nargs - 1] = UINT_MAX;
|
|
|
|
}
|
|
|
|
term->termstate = SEEN_CSI;
|
|
|
|
} else if (c == ';') {
|
|
|
|
if (term->esc_nargs < ARGS_MAX)
|
|
|
|
term->esc_args[term->esc_nargs++] = ARG_DEFAULT;
|
|
|
|
term->termstate = SEEN_CSI;
|
|
|
|
} else if (c < '@') {
|
|
|
|
if (term->esc_query)
|
|
|
|
term->esc_query = -1;
|
|
|
|
else if (c == '?')
|
|
|
|
term->esc_query = 1;
|
|
|
|
else
|
|
|
|
term->esc_query = c;
|
|
|
|
term->termstate = SEEN_CSI;
|
|
|
|
} else
|
|
|
|
#define CLAMP(arg, lim) ((arg) = ((arg) > (lim)) ? (lim) : (arg))
|
|
|
|
switch (ANSI(c, term->esc_query)) {
|
|
|
|
case 'A': /* CUU: move up N lines */
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
move(term, term->curs.x,
|
|
|
|
term->curs.y - def(term->esc_args[0], 1), 1);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'e': /* VPR: move down N lines */
|
|
|
|
compatibility(ANSI);
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
case 'B': /* CUD: Cursor down */
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
move(term, term->curs.x,
|
|
|
|
term->curs.y + def(term->esc_args[0], 1), 1);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'b': /* REP: repeat previous grap */
|
|
|
|
CLAMP(term->esc_args[0], term->rows * term->cols);
|
|
|
|
if (term->last_graphic_char) {
|
|
|
|
unsigned i;
|
|
|
|
for (i = 0; i < term->esc_args[0]; i++)
|
|
|
|
term_display_graphic_char(
|
|
|
|
term, term->last_graphic_char);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ANSI('c', '>'): /* DA: report xterm version */
|
|
|
|
compatibility(OTHER);
|
|
|
|
/* this reports xterm version 136 so that VIM can
|
|
|
|
use the drag messages from the mouse reporting */
|
|
|
|
if (term->ldisc)
|
|
|
|
ldisc_send(term->ldisc, "\033[>0;136;0c", 11,
|
|
|
|
false);
|
|
|
|
break;
|
|
|
|
case 'a': /* HPR: move right N cols */
|
|
|
|
compatibility(ANSI);
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
case 'C': /* CUF: Cursor right */
|
|
|
|
CLAMP(term->esc_args[0], term->cols);
|
|
|
|
move(term, term->curs.x + def(term->esc_args[0], 1),
|
|
|
|
term->curs.y, 1);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'D': /* CUB: move left N cols */
|
|
|
|
CLAMP(term->esc_args[0], term->cols);
|
|
|
|
move(term, term->curs.x - def(term->esc_args[0], 1),
|
|
|
|
term->curs.y, 1);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'E': /* CNL: move down N lines and CR */
|
|
|
|
compatibility(ANSI);
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
move(term, 0,
|
|
|
|
term->curs.y + def(term->esc_args[0], 1), 1);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'F': /* CPL: move up N lines and CR */
|
|
|
|
compatibility(ANSI);
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
move(term, 0,
|
|
|
|
term->curs.y - def(term->esc_args[0], 1), 1);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'G': /* CHA */
|
|
|
|
case '`': /* HPA: set horizontal posn */
|
|
|
|
compatibility(ANSI);
|
|
|
|
CLAMP(term->esc_args[0], term->cols);
|
|
|
|
move(term, def(term->esc_args[0], 1) - 1,
|
|
|
|
term->curs.y, 0);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'd': /* VPA: set vertical posn */
|
|
|
|
compatibility(ANSI);
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
move(term, term->curs.x,
|
|
|
|
((term->dec_om ? term->marg_t : 0) +
|
|
|
|
def(term->esc_args[0], 1) - 1),
|
|
|
|
(term->dec_om ? 2 : 0));
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'H': /* CUP */
|
|
|
|
case 'f': /* HVP: set horz and vert posns at once */
|
|
|
|
if (term->esc_nargs < 2)
|
|
|
|
term->esc_args[1] = ARG_DEFAULT;
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
CLAMP(term->esc_args[1], term->cols);
|
|
|
|
move(term, def(term->esc_args[1], 1) - 1,
|
|
|
|
((term->dec_om ? term->marg_t : 0) +
|
|
|
|
def(term->esc_args[0], 1) - 1),
|
|
|
|
(term->dec_om ? 2 : 0));
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case 'J': { /* ED: erase screen or parts of it */
|
|
|
|
unsigned int i = def(term->esc_args[0], 0);
|
|
|
|
if (i == 3) {
|
2021-02-07 19:59:20 +00:00
|
|
|
/* Erase Saved Lines (xterm)
|
|
|
|
* This follows Thomas Dickey's xterm. */
|
|
|
|
if (!term->no_remote_clearscroll)
|
|
|
|
term_clrsb(term);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
} else {
|
2021-02-07 19:59:20 +00:00
|
|
|
i++;
|
|
|
|
if (i > 3)
|
|
|
|
i = 0;
|
|
|
|
erase_lots(term, false, !!(i & 2), !!(i & 1));
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
|
|
|
case 'K': { /* EL: erase line or parts of it */
|
|
|
|
unsigned int i = def(term->esc_args[0], 0) + 1;
|
|
|
|
if (i > 3)
|
|
|
|
i = 0;
|
|
|
|
erase_lots(term, true, !!(i & 2), !!(i & 1));
|
2019-09-08 19:29:00 +00:00
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
case 'L': /* IL: insert lines */
|
|
|
|
compatibility(VT102);
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
if (term->curs.y <= term->marg_b)
|
|
|
|
scroll(term, term->curs.y, term->marg_b,
|
|
|
|
-def(term->esc_args[0], 1), false);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'M': /* DL: delete lines */
|
|
|
|
compatibility(VT102);
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
if (term->curs.y <= term->marg_b)
|
|
|
|
scroll(term, term->curs.y, term->marg_b,
|
|
|
|
def(term->esc_args[0], 1),
|
|
|
|
true);
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case '@': /* ICH: insert chars */
|
|
|
|
/* XXX VTTEST says this is vt220, vt510 manual says vt102 */
|
|
|
|
compatibility(VT102);
|
|
|
|
CLAMP(term->esc_args[0], term->cols);
|
|
|
|
insch(term, def(term->esc_args[0], 1));
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'P': /* DCH: delete chars */
|
|
|
|
compatibility(VT102);
|
|
|
|
CLAMP(term->esc_args[0], term->cols);
|
|
|
|
insch(term, -def(term->esc_args[0], 1));
|
|
|
|
seen_disp_event(term);
|
|
|
|
break;
|
|
|
|
case 'c': /* DA: terminal type query */
|
|
|
|
compatibility(VT100);
|
|
|
|
/* This is the response for a VT102 */
|
2020-06-14 09:12:59 +00:00
|
|
|
if (term->ldisc)
|
2019-09-08 19:29:00 +00:00
|
|
|
ldisc_send(term->ldisc, term->id_string,
|
|
|
|
strlen(term->id_string), false);
|
|
|
|
break;
|
|
|
|
case 'n': /* DSR: cursor position query */
|
|
|
|
if (term->ldisc) {
|
|
|
|
if (term->esc_args[0] == 6) {
|
|
|
|
char buf[32];
|
|
|
|
sprintf(buf, "\033[%d;%dR", term->curs.y + 1,
|
|
|
|
term->curs.x + 1);
|
|
|
|
ldisc_send(term->ldisc, buf, strlen(buf),
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
false);
|
2019-09-08 19:29:00 +00:00
|
|
|
} else if (term->esc_args[0] == 5) {
|
|
|
|
ldisc_send(term->ldisc, "\033[0n", 4, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'h': /* SM: toggle modes to high */
|
|
|
|
case ANSI_QUE('h'):
|
|
|
|
compatibility(VT100);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
for (int i = 0; i < term->esc_nargs; i++)
|
|
|
|
toggle_mode(term, term->esc_args[i],
|
|
|
|
term->esc_query, true);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 'i': /* MC: Media copy */
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case ANSI_QUE('i'): {
|
2019-09-08 19:29:00 +00:00
|
|
|
compatibility(VT100);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
char *printer;
|
|
|
|
if (term->esc_nargs != 1) break;
|
|
|
|
if (term->esc_args[0] == 5 &&
|
|
|
|
(printer = conf_get_str(term->conf,
|
|
|
|
CONF_printer))[0]) {
|
2021-02-07 19:59:20 +00:00
|
|
|
term->printing = true;
|
|
|
|
term->only_printing = !term->esc_query;
|
|
|
|
term->print_state = 0;
|
|
|
|
term_print_setup(term, printer);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
} else if (term->esc_args[0] == 4 &&
|
|
|
|
term->printing) {
|
2021-02-07 19:59:20 +00:00
|
|
|
term_print_finish(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
case 'l': /* RM: toggle modes to low */
|
|
|
|
case ANSI_QUE('l'):
|
|
|
|
compatibility(VT100);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
for (int i = 0; i < term->esc_nargs; i++)
|
|
|
|
toggle_mode(term, term->esc_args[i],
|
|
|
|
term->esc_query, false);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 'g': /* TBC: clear tabs */
|
|
|
|
compatibility(VT100);
|
|
|
|
if (term->esc_nargs == 1) {
|
|
|
|
if (term->esc_args[0] == 0) {
|
|
|
|
term->tabs[term->curs.x] = false;
|
|
|
|
} else if (term->esc_args[0] == 3) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < term->cols; i++)
|
|
|
|
term->tabs[i] = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'r': /* DECSTBM: set scroll margins */
|
|
|
|
compatibility(VT100);
|
|
|
|
if (term->esc_nargs <= 2) {
|
|
|
|
int top, bot;
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
CLAMP(term->esc_args[1], term->rows);
|
|
|
|
top = def(term->esc_args[0], 1) - 1;
|
|
|
|
bot = (term->esc_nargs <= 1
|
|
|
|
|| term->esc_args[1] == 0 ?
|
|
|
|
term->rows :
|
|
|
|
def(term->esc_args[1], term->rows)) - 1;
|
|
|
|
if (bot >= term->rows)
|
|
|
|
bot = term->rows - 1;
|
|
|
|
/* VTTEST Bug 9 - if region is less than 2 lines
|
|
|
|
* don't change region.
|
|
|
|
*/
|
|
|
|
if (bot - top > 0) {
|
|
|
|
term->marg_t = top;
|
|
|
|
term->marg_b = bot;
|
|
|
|
term->curs.x = 0;
|
|
|
|
/*
|
|
|
|
* I used to think the cursor should be
|
|
|
|
* placed at the top of the newly marginned
|
|
|
|
* area. Apparently not: VMS TPU falls over
|
|
|
|
* if so.
|
|
|
|
*
|
|
|
|
* Well actually it should for
|
|
|
|
* Origin mode - RDB
|
|
|
|
*/
|
|
|
|
term->curs.y = (term->dec_om ?
|
|
|
|
term->marg_t : 0);
|
|
|
|
seen_disp_event(term);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'm': /* SGR: set graphics rendition */
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
/*
|
|
|
|
* A VT100 without the AVO only had one
|
|
|
|
* attribute, either underline or reverse
|
|
|
|
* video depending on the cursor type, this
|
|
|
|
* was selected by CSI 7m.
|
|
|
|
*
|
|
|
|
* case 2:
|
|
|
|
* This is sometimes DIM, eg on the GIGI and
|
|
|
|
* Linux
|
|
|
|
* case 8:
|
|
|
|
* This is sometimes INVIS various ANSI.
|
|
|
|
* case 21:
|
|
|
|
* This like 22 disables BOLD, DIM and INVIS
|
|
|
|
*
|
|
|
|
* The ANSI colours appear on any terminal
|
|
|
|
* that has colour (obviously) but the
|
|
|
|
* interaction between sgr0 and the colours
|
|
|
|
* varies but is usually related to the
|
|
|
|
* background colour erase item. The
|
|
|
|
* interaction between colour attributes and
|
|
|
|
* the mono ones is also very implementation
|
|
|
|
* dependent.
|
|
|
|
*
|
|
|
|
* The 39 and 49 attributes are likely to be
|
|
|
|
* unimplemented.
|
|
|
|
*/
|
2021-02-07 19:59:20 +00:00
|
|
|
for (int i = 0; i < term->esc_nargs; i++)
|
|
|
|
switch (def(term->esc_args[i], 0)) {
|
|
|
|
case 0: /* restore defaults */
|
|
|
|
term->curr_attr = term->default_attr;
|
|
|
|
term->curr_truecolour =
|
|
|
|
term->basic_erase_char.truecolour;
|
|
|
|
break;
|
|
|
|
case 1: /* enable bold */
|
|
|
|
compatibility(VT100AVO);
|
|
|
|
term->curr_attr |= ATTR_BOLD;
|
|
|
|
break;
|
|
|
|
case 2: /* enable dim */
|
|
|
|
compatibility(OTHER);
|
|
|
|
term->curr_attr |= ATTR_DIM;
|
|
|
|
break;
|
|
|
|
case 21: /* (enable double underline) */
|
|
|
|
compatibility(OTHER);
|
|
|
|
case 4: /* enable underline */
|
|
|
|
compatibility(VT100AVO);
|
|
|
|
term->curr_attr |= ATTR_UNDER;
|
|
|
|
break;
|
|
|
|
case 5: /* enable blink */
|
|
|
|
compatibility(VT100AVO);
|
|
|
|
term->curr_attr |= ATTR_BLINK;
|
|
|
|
break;
|
|
|
|
case 6: /* SCO light bkgrd */
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
term->blink_is_real = false;
|
|
|
|
term->curr_attr |= ATTR_BLINK;
|
|
|
|
term_schedule_tblink(term);
|
|
|
|
break;
|
|
|
|
case 7: /* enable reverse video */
|
|
|
|
term->curr_attr |= ATTR_REVERSE;
|
|
|
|
break;
|
|
|
|
case 9: /* enable strikethrough */
|
|
|
|
term->curr_attr |= ATTR_STRIKE;
|
|
|
|
break;
|
|
|
|
case 10: /* SCO acs off */
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
if (term->no_remote_charset) break;
|
|
|
|
term->sco_acs = 0; break;
|
|
|
|
case 11: /* SCO acs on */
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
if (term->no_remote_charset) break;
|
|
|
|
term->sco_acs = 1; break;
|
|
|
|
case 12: /* SCO acs on, |0x80 */
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
if (term->no_remote_charset) break;
|
|
|
|
term->sco_acs = 2; break;
|
|
|
|
case 22: /* disable bold and dim */
|
|
|
|
compatibility2(OTHER, VT220);
|
|
|
|
term->curr_attr &= ~(ATTR_BOLD | ATTR_DIM);
|
|
|
|
break;
|
|
|
|
case 24: /* disable underline */
|
|
|
|
compatibility2(OTHER, VT220);
|
|
|
|
term->curr_attr &= ~ATTR_UNDER;
|
|
|
|
break;
|
|
|
|
case 25: /* disable blink */
|
|
|
|
compatibility2(OTHER, VT220);
|
|
|
|
term->curr_attr &= ~ATTR_BLINK;
|
|
|
|
break;
|
|
|
|
case 27: /* disable reverse video */
|
|
|
|
compatibility2(OTHER, VT220);
|
|
|
|
term->curr_attr &= ~ATTR_REVERSE;
|
|
|
|
break;
|
|
|
|
case 29: /* disable strikethrough */
|
|
|
|
term->curr_attr &= ~ATTR_STRIKE;
|
|
|
|
break;
|
|
|
|
case 30:
|
|
|
|
case 31:
|
|
|
|
case 32:
|
|
|
|
case 33:
|
|
|
|
case 34:
|
|
|
|
case 35:
|
|
|
|
case 36:
|
|
|
|
case 37:
|
|
|
|
/* foreground */
|
|
|
|
term->curr_truecolour.fg.enabled = false;
|
|
|
|
term->curr_attr &= ~ATTR_FGMASK;
|
|
|
|
term->curr_attr |=
|
|
|
|
(term->esc_args[i] - 30)<<ATTR_FGSHIFT;
|
|
|
|
break;
|
|
|
|
case 90:
|
|
|
|
case 91:
|
|
|
|
case 92:
|
|
|
|
case 93:
|
|
|
|
case 94:
|
|
|
|
case 95:
|
|
|
|
case 96:
|
|
|
|
case 97:
|
|
|
|
/* aixterm-style bright foreground */
|
|
|
|
term->curr_truecolour.fg.enabled = false;
|
|
|
|
term->curr_attr &= ~ATTR_FGMASK;
|
|
|
|
term->curr_attr |=
|
|
|
|
((term->esc_args[i] - 90 + 8)
|
|
|
|
<< ATTR_FGSHIFT);
|
|
|
|
break;
|
|
|
|
case 39: /* default-foreground */
|
|
|
|
term->curr_truecolour.fg.enabled = false;
|
|
|
|
term->curr_attr &= ~ATTR_FGMASK;
|
|
|
|
term->curr_attr |= ATTR_DEFFG;
|
|
|
|
break;
|
|
|
|
case 40:
|
|
|
|
case 41:
|
|
|
|
case 42:
|
|
|
|
case 43:
|
|
|
|
case 44:
|
|
|
|
case 45:
|
|
|
|
case 46:
|
|
|
|
case 47:
|
|
|
|
/* background */
|
|
|
|
term->curr_truecolour.bg.enabled = false;
|
|
|
|
term->curr_attr &= ~ATTR_BGMASK;
|
|
|
|
term->curr_attr |=
|
|
|
|
(term->esc_args[i] - 40)<<ATTR_BGSHIFT;
|
|
|
|
break;
|
|
|
|
case 100:
|
|
|
|
case 101:
|
|
|
|
case 102:
|
|
|
|
case 103:
|
|
|
|
case 104:
|
|
|
|
case 105:
|
|
|
|
case 106:
|
|
|
|
case 107:
|
|
|
|
/* aixterm-style bright background */
|
|
|
|
term->curr_truecolour.bg.enabled = false;
|
|
|
|
term->curr_attr &= ~ATTR_BGMASK;
|
|
|
|
term->curr_attr |=
|
|
|
|
((term->esc_args[i] - 100 + 8)
|
|
|
|
<< ATTR_BGSHIFT);
|
|
|
|
break;
|
|
|
|
case 49: /* default-background */
|
|
|
|
term->curr_truecolour.bg.enabled = false;
|
|
|
|
term->curr_attr &= ~ATTR_BGMASK;
|
|
|
|
term->curr_attr |= ATTR_DEFBG;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 256-colour and true-colour
|
|
|
|
* sequences. A 256-colour
|
|
|
|
* foreground is selected by a
|
|
|
|
* sequence of 3 arguments in the
|
|
|
|
* form 38;5;n, where n is in the
|
|
|
|
* range 0-255. A true-colour RGB
|
|
|
|
* triple is selected by 5 args of
|
|
|
|
* the form 38;2;r;g;b. Replacing
|
|
|
|
* the initial 38 with 48 in both
|
|
|
|
* cases selects the same colour
|
|
|
|
* as the background.
|
|
|
|
*/
|
|
|
|
case 38:
|
|
|
|
if (i+2 < term->esc_nargs &&
|
|
|
|
term->esc_args[i+1] == 5) {
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
term->curr_attr &= ~ATTR_FGMASK;
|
|
|
|
term->curr_attr |=
|
|
|
|
((term->esc_args[i+2] & 0xFF)
|
|
|
|
<< ATTR_FGSHIFT);
|
|
|
|
term->curr_truecolour.fg =
|
|
|
|
optionalrgb_none;
|
|
|
|
i += 2;
|
2021-02-07 19:59:20 +00:00
|
|
|
}
|
|
|
|
if (i + 4 < term->esc_nargs &&
|
|
|
|
term->esc_args[i + 1] == 2) {
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
parse_optionalrgb(
|
|
|
|
&term->curr_truecolour.fg,
|
|
|
|
term->esc_args + (i+2));
|
|
|
|
i += 4;
|
2021-02-07 19:59:20 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 48:
|
|
|
|
if (i+2 < term->esc_nargs &&
|
|
|
|
term->esc_args[i+1] == 5) {
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
term->curr_attr &= ~ATTR_BGMASK;
|
|
|
|
term->curr_attr |=
|
|
|
|
((term->esc_args[i+2] & 0xFF)
|
|
|
|
<< ATTR_BGSHIFT);
|
|
|
|
term->curr_truecolour.bg =
|
|
|
|
optionalrgb_none;
|
|
|
|
i += 2;
|
2021-02-07 19:59:20 +00:00
|
|
|
}
|
|
|
|
if (i + 4 < term->esc_nargs &&
|
|
|
|
term->esc_args[i+1] == 2) {
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
parse_optionalrgb(
|
|
|
|
&term->curr_truecolour.bg,
|
|
|
|
term->esc_args + (i+2));
|
|
|
|
i += 4;
|
2021-02-07 19:59:20 +00:00
|
|
|
}
|
|
|
|
break;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
set_erase_char(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 's': /* save cursor */
|
|
|
|
save_cursor(term, true);
|
|
|
|
break;
|
|
|
|
case 'u': /* restore cursor */
|
|
|
|
save_cursor(term, false);
|
|
|
|
break;
|
|
|
|
case 't': /* DECSLPP: set page size - ie window height */
|
|
|
|
/*
|
|
|
|
* VT340/VT420 sequence DECSLPP, DEC only allows values
|
|
|
|
* 24/25/36/48/72/144 other emulators (eg dtterm) use
|
|
|
|
* illegal values (eg first arg 1..9) for window changing
|
|
|
|
* and reports.
|
|
|
|
*/
|
|
|
|
if (term->esc_nargs <= 1
|
|
|
|
&& (term->esc_args[0] < 1 ||
|
|
|
|
term->esc_args[0] >= 24)) {
|
|
|
|
compatibility(VT340TEXT);
|
2021-12-13 18:49:45 +00:00
|
|
|
if (!term->no_remote_resize)
|
|
|
|
term_request_resize(term, term->cols, 24);
|
2019-09-08 19:29:00 +00:00
|
|
|
deselect(term);
|
|
|
|
} else if (term->esc_nargs >= 1 &&
|
|
|
|
term->esc_args[0] >= 1 &&
|
|
|
|
term->esc_args[0] < 24) {
|
|
|
|
compatibility(OTHER);
|
|
|
|
|
|
|
|
switch (term->esc_args[0]) {
|
2021-02-07 19:59:21 +00:00
|
|
|
int len;
|
2019-09-08 19:29:00 +00:00
|
|
|
char buf[80];
|
2015-05-15 10:15:42 +00:00
|
|
|
const char *p;
|
2019-09-08 19:29:00 +00:00
|
|
|
case 1:
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_minimise_pending = true;
|
|
|
|
term->win_minimise_enable = false;
|
|
|
|
term_schedule_update(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_minimise_pending = true;
|
|
|
|
term->win_minimise_enable = true;
|
|
|
|
term_schedule_update(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
if (term->esc_nargs >= 3) {
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
if (!term->no_remote_resize) {
|
|
|
|
term->win_move_pending = true;
|
|
|
|
term->win_move_pending_x =
|
|
|
|
def(term->esc_args[1], 0);
|
|
|
|
term->win_move_pending_y =
|
|
|
|
def(term->esc_args[2], 0);
|
|
|
|
term_schedule_update(term);
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
/* We should resize the window to a given
|
|
|
|
* size in pixels here, but currently our
|
|
|
|
* resizing code isn't healthy enough to
|
|
|
|
* manage it. */
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
/* move to top */
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_zorder_pending = true;
|
|
|
|
term->win_zorder_top = true;
|
|
|
|
term_schedule_update(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
/* move to bottom */
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_zorder_pending = true;
|
|
|
|
term->win_zorder_top = false;
|
|
|
|
term_schedule_update(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 7:
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_refresh_pending = true;
|
|
|
|
term_schedule_update(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 8:
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
if (term->esc_nargs >= 3 &&
|
|
|
|
!term->no_remote_resize) {
|
2021-12-13 18:49:45 +00:00
|
|
|
term_request_resize(
|
|
|
|
term,
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
def(term->esc_args[2],
|
2021-12-13 18:49:45 +00:00
|
|
|
term->conf_width),
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
def(term->esc_args[1],
|
2021-12-13 18:49:45 +00:00
|
|
|
term->conf_height));
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 9:
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
if (term->esc_nargs >= 2) {
|
|
|
|
term->win_maximise_pending = true;
|
|
|
|
term->win_maximise_enable =
|
|
|
|
term->esc_args[1];
|
|
|
|
term_schedule_update(term);
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 11:
|
|
|
|
if (term->ldisc)
|
2021-02-07 19:59:20 +00:00
|
|
|
ldisc_send(term->ldisc, term->minimised ?
|
2019-09-08 19:29:00 +00:00
|
|
|
"\033[2t" : "\033[1t", 4,
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
false);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 13:
|
|
|
|
if (term->ldisc) {
|
|
|
|
len = sprintf(buf, "\033[3;%u;%ut",
|
2021-02-07 19:59:21 +00:00
|
|
|
term->winpos_x,
|
|
|
|
term->winpos_y);
|
2019-09-08 19:29:00 +00:00
|
|
|
ldisc_send(term->ldisc, buf, len, false);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 14:
|
|
|
|
if (term->ldisc) {
|
2021-02-07 19:59:21 +00:00
|
|
|
len = sprintf(buf, "\033[4;%u;%ut",
|
|
|
|
term->winpixsize_y,
|
|
|
|
term->winpixsize_x);
|
2019-09-08 19:29:00 +00:00
|
|
|
ldisc_send(term->ldisc, buf, len, false);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 18:
|
|
|
|
if (term->ldisc) {
|
|
|
|
len = sprintf(buf, "\033[8;%d;%dt",
|
|
|
|
term->rows, term->cols);
|
|
|
|
ldisc_send(term->ldisc, buf, len, false);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 19:
|
|
|
|
/*
|
|
|
|
* Hmmm. Strictly speaking we
|
|
|
|
* should return `the size of the
|
|
|
|
* screen in characters', but
|
|
|
|
* that's not easy: (a) window
|
|
|
|
* furniture being what it is it's
|
|
|
|
* hard to compute, and (b) in
|
|
|
|
* resize-font mode maximising the
|
|
|
|
* window wouldn't change the
|
|
|
|
* number of characters. *shrug*. I
|
|
|
|
* think we'll ignore it for the
|
|
|
|
* moment and see if anyone
|
|
|
|
* complains, and then ask them
|
|
|
|
* what they would like it to do.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
case 20:
|
|
|
|
if (term->ldisc &&
|
|
|
|
term->remote_qtitle_action != TITLE_NONE) {
|
|
|
|
if(term->remote_qtitle_action == TITLE_REAL)
|
2021-02-07 19:59:20 +00:00
|
|
|
p = term->icon_title;
|
2019-09-08 19:29:00 +00:00
|
|
|
else
|
|
|
|
p = EMPTY_WINDOW_TITLE;
|
|
|
|
len = strlen(p);
|
|
|
|
ldisc_send(term->ldisc, "\033]L", 3,
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
false);
|
2020-06-14 09:12:59 +00:00
|
|
|
ldisc_send(term->ldisc, p, len, false);
|
2019-09-08 19:29:00 +00:00
|
|
|
ldisc_send(term->ldisc, "\033\\", 2,
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
false);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 21:
|
|
|
|
if (term->ldisc &&
|
|
|
|
term->remote_qtitle_action != TITLE_NONE) {
|
|
|
|
if(term->remote_qtitle_action == TITLE_REAL)
|
2021-02-07 19:59:20 +00:00
|
|
|
p = term->window_title;
|
2019-09-08 19:29:00 +00:00
|
|
|
else
|
|
|
|
p = EMPTY_WINDOW_TITLE;
|
|
|
|
len = strlen(p);
|
|
|
|
ldisc_send(term->ldisc, "\033]l", 3,
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
false);
|
2020-06-14 09:12:59 +00:00
|
|
|
ldisc_send(term->ldisc, p, len, false);
|
2019-09-08 19:29:00 +00:00
|
|
|
ldisc_send(term->ldisc, "\033\\", 2,
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
false);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'S': /* SU: Scroll up */
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
scroll(term, term->marg_t, term->marg_b,
|
|
|
|
def(term->esc_args[0], 1), true);
|
|
|
|
term->wrapnext = false;
|
|
|
|
break;
|
|
|
|
case 'T': /* SD: Scroll down */
|
|
|
|
CLAMP(term->esc_args[0], term->rows);
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
scroll(term, term->marg_t, term->marg_b,
|
|
|
|
-def(term->esc_args[0], 1), true);
|
|
|
|
term->wrapnext = false;
|
|
|
|
break;
|
|
|
|
case ANSI('|', '*'): /* DECSNLS */
|
|
|
|
/*
|
|
|
|
* Set number of lines on screen
|
|
|
|
* VT420 uses VGA like hardware and can
|
|
|
|
* support any size in reasonable range
|
|
|
|
* (24..49 AIUI) with no default specified.
|
|
|
|
*/
|
|
|
|
compatibility(VT420);
|
|
|
|
if (term->esc_nargs == 1 && term->esc_args[0] > 0) {
|
2021-12-13 18:49:45 +00:00
|
|
|
if (!term->no_remote_resize)
|
|
|
|
term_request_resize(
|
|
|
|
term,
|
|
|
|
term->cols,
|
|
|
|
def(term->esc_args[0], term->conf_height));
|
2019-09-08 19:29:00 +00:00
|
|
|
deselect(term);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ANSI('|', '$'): /* DECSCPP */
|
|
|
|
/*
|
|
|
|
* Set number of columns per page
|
|
|
|
* Docs imply range is only 80 or 132, but
|
|
|
|
* I'll allow any.
|
|
|
|
*/
|
|
|
|
compatibility(VT340TEXT);
|
|
|
|
if (term->esc_nargs <= 1) {
|
2021-12-13 18:49:45 +00:00
|
|
|
if (!term->no_remote_resize)
|
|
|
|
term_request_resize(
|
|
|
|
term,
|
|
|
|
def(term->esc_args[0], term->conf_width),
|
|
|
|
term->rows);
|
2019-09-08 19:29:00 +00:00
|
|
|
deselect(term);
|
|
|
|
}
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case 'X': { /* ECH: write N spaces w/o moving cursor */
|
2019-09-08 19:29:00 +00:00
|
|
|
/* XXX VTTEST says this is vt220, vt510 manual
|
|
|
|
* says vt100 */
|
|
|
|
compatibility(ANSIMIN);
|
|
|
|
CLAMP(term->esc_args[0], term->cols);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
int n = def(term->esc_args[0], 1);
|
|
|
|
pos cursplus;
|
|
|
|
int p = term->curs.x;
|
|
|
|
termline *cline = scrlineptr(term->curs.y);
|
|
|
|
|
|
|
|
check_trust_status(term, cline);
|
|
|
|
if (n > term->cols - term->curs.x)
|
|
|
|
n = term->cols - term->curs.x;
|
|
|
|
cursplus = term->curs;
|
|
|
|
cursplus.x += n;
|
|
|
|
check_boundary(term, term->curs.x, term->curs.y);
|
|
|
|
check_boundary(term, term->curs.x+n, term->curs.y);
|
|
|
|
check_selection(term, term->curs, cursplus);
|
|
|
|
while (n--)
|
|
|
|
copy_termchar(cline, p++,
|
|
|
|
&term->erase_char);
|
|
|
|
seen_disp_event(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
case 'x': /* DECREQTPARM: report terminal characteristics */
|
|
|
|
compatibility(VT100);
|
|
|
|
if (term->ldisc) {
|
|
|
|
char buf[32];
|
|
|
|
int i = def(term->esc_args[0], 0);
|
|
|
|
if (i == 0 || i == 1) {
|
|
|
|
strcpy(buf, "\033[2;1;1;112;112;1;0x");
|
|
|
|
buf[2] += i;
|
|
|
|
ldisc_send(term->ldisc, buf, 20, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case 'Z': { /* CBT */
|
2019-09-08 19:29:00 +00:00
|
|
|
compatibility(OTHER);
|
|
|
|
CLAMP(term->esc_args[0], term->cols);
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
int i = def(term->esc_args[0], 1);
|
|
|
|
pos old_curs = term->curs;
|
|
|
|
|
2022-12-28 15:32:24 +00:00
|
|
|
for (; i>0 && term->curs.x>0; i--) {
|
2021-02-07 19:59:20 +00:00
|
|
|
do {
|
|
|
|
term->curs.x--;
|
|
|
|
} while (term->curs.x >0 &&
|
|
|
|
!term->tabs[term->curs.x]);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
check_selection(term, old_curs, term->curs);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
case ANSI('c', '='): /* Hide or Show Cursor */
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
switch(term->esc_args[0]) {
|
|
|
|
case 0: /* hide cursor */
|
|
|
|
term->cursor_on = false;
|
|
|
|
break;
|
|
|
|
case 1: /* restore cursor */
|
|
|
|
term->big_cursor = false;
|
|
|
|
term->cursor_on = true;
|
|
|
|
break;
|
|
|
|
case 2: /* block cursor */
|
|
|
|
term->big_cursor = true;
|
|
|
|
term->cursor_on = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ANSI('C', '='):
|
|
|
|
/*
|
|
|
|
* set cursor start on scanline esc_args[0] and
|
|
|
|
* end on scanline esc_args[1].If you set
|
|
|
|
* the bottom scan line to a value less than
|
|
|
|
* the top scan line, the cursor will disappear.
|
|
|
|
*/
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
if (term->esc_nargs >= 2) {
|
|
|
|
if (term->esc_args[0] > term->esc_args[1])
|
|
|
|
term->cursor_on = false;
|
|
|
|
else
|
|
|
|
term->cursor_on = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ANSI('D', '='):
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
term->blink_is_real = false;
|
|
|
|
term_schedule_tblink(term);
|
|
|
|
if (term->esc_args[0]>=1)
|
|
|
|
term->curr_attr |= ATTR_BLINK;
|
|
|
|
else
|
|
|
|
term->curr_attr &= ~ATTR_BLINK;
|
|
|
|
break;
|
|
|
|
case ANSI('E', '='):
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
term->blink_is_real = (term->esc_args[0] >= 1);
|
|
|
|
term_schedule_tblink(term);
|
|
|
|
break;
|
|
|
|
case ANSI('F', '='): /* set normal foreground */
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
if (term->esc_args[0] < 16) {
|
|
|
|
long colour =
|
|
|
|
(sco2ansicolour[term->esc_args[0] & 0x7] |
|
|
|
|
(term->esc_args[0] & 0x8)) <<
|
|
|
|
ATTR_FGSHIFT;
|
|
|
|
term->curr_attr &= ~ATTR_FGMASK;
|
|
|
|
term->curr_attr |= colour;
|
2017-10-08 12:47:39 +00:00
|
|
|
term->curr_truecolour.fg = optionalrgb_none;
|
2019-09-08 19:29:00 +00:00
|
|
|
term->default_attr &= ~ATTR_FGMASK;
|
|
|
|
term->default_attr |= colour;
|
|
|
|
set_erase_char(term);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ANSI('G', '='): /* set normal background */
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
if (term->esc_args[0] < 16) {
|
|
|
|
long colour =
|
|
|
|
(sco2ansicolour[term->esc_args[0] & 0x7] |
|
|
|
|
(term->esc_args[0] & 0x8)) <<
|
|
|
|
ATTR_BGSHIFT;
|
|
|
|
term->curr_attr &= ~ATTR_BGMASK;
|
|
|
|
term->curr_attr |= colour;
|
2017-10-08 12:47:39 +00:00
|
|
|
term->curr_truecolour.bg = optionalrgb_none;
|
2019-09-08 19:29:00 +00:00
|
|
|
term->default_attr &= ~ATTR_BGMASK;
|
|
|
|
term->default_attr |= colour;
|
|
|
|
set_erase_char(term);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ANSI('L', '='):
|
|
|
|
compatibility(SCOANSI);
|
|
|
|
term->use_bce = (term->esc_args[0] <= 0);
|
|
|
|
set_erase_char(term);
|
|
|
|
break;
|
|
|
|
case ANSI('p', '"'): /* DECSCL: set compat level */
|
|
|
|
/*
|
|
|
|
* Allow the host to make this emulator a
|
|
|
|
* 'perfect' VT102. This first appeared in
|
|
|
|
* the VT220, but we do need to get back to
|
|
|
|
* PuTTY mode so I won't check it.
|
|
|
|
*
|
|
|
|
* The arg in 40..42,50 are a PuTTY extension.
|
|
|
|
* The 2nd arg, 8bit vs 7bit is not checked.
|
|
|
|
*
|
|
|
|
* Setting VT102 mode should also change
|
|
|
|
* the Fkeys to generate PF* codes as a
|
|
|
|
* real VT102 has no Fkeys. The VT220 does
|
|
|
|
* this, F11..F13 become ESC,BS,LF other
|
|
|
|
* Fkeys send nothing.
|
|
|
|
*
|
|
|
|
* Note ESC c will NOT change this!
|
|
|
|
*/
|
|
|
|
|
|
|
|
switch (term->esc_args[0]) {
|
|
|
|
case 61:
|
|
|
|
term->compatibility_level &= ~TM_VTXXX;
|
|
|
|
term->compatibility_level |= TM_VT102;
|
|
|
|
break;
|
|
|
|
case 62:
|
|
|
|
term->compatibility_level &= ~TM_VTXXX;
|
|
|
|
term->compatibility_level |= TM_VT220;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (term->esc_args[0] > 60 &&
|
|
|
|
term->esc_args[0] < 70)
|
|
|
|
term->compatibility_level |= TM_VTXXX;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 40:
|
|
|
|
term->compatibility_level &= TM_VTXXX;
|
|
|
|
break;
|
|
|
|
case 41:
|
|
|
|
term->compatibility_level = TM_PUTTY;
|
|
|
|
break;
|
|
|
|
case 42:
|
|
|
|
term->compatibility_level = TM_SCOANSI;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ARG_DEFAULT:
|
|
|
|
term->compatibility_level = TM_PUTTY;
|
|
|
|
break;
|
|
|
|
case 50:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Change the response to CSI c */
|
|
|
|
if (term->esc_args[0] == 50) {
|
|
|
|
int i;
|
|
|
|
char lbuf[64];
|
|
|
|
strcpy(term->id_string, "\033[?");
|
|
|
|
for (i = 1; i < term->esc_nargs; i++) {
|
|
|
|
if (i != 1)
|
|
|
|
strcat(term->id_string, ";");
|
|
|
|
sprintf(lbuf, "%u", term->esc_args[i]);
|
|
|
|
strcat(term->id_string, lbuf);
|
|
|
|
}
|
|
|
|
strcat(term->id_string, "c");
|
|
|
|
}
|
2001-05-06 14:35:20 +00:00
|
|
|
#if 0
|
2019-09-08 19:29:00 +00:00
|
|
|
/* Is this a good idea ?
|
|
|
|
* Well we should do a soft reset at this point ...
|
|
|
|
*/
|
|
|
|
if (!has_compat(VT420) && has_compat(VT100)) {
|
2021-12-13 18:49:45 +00:00
|
|
|
if (!term->no_remote_resize)
|
|
|
|
term_request_resize(term,
|
|
|
|
term->reset_132 ? 132 : 80,
|
|
|
|
24);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
2001-05-06 14:35:20 +00:00
|
|
|
#endif
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SEEN_OSC:
|
|
|
|
term->osc_w = false;
|
|
|
|
switch (c) {
|
|
|
|
case 'P': /* Linux palette sequence */
|
|
|
|
term->termstate = SEEN_OSC_P;
|
|
|
|
term->osc_strlen = 0;
|
|
|
|
break;
|
|
|
|
case 'R': /* Linux palette reset */
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
palette_reset(term, false);
|
2019-09-08 19:29:00 +00:00
|
|
|
term_invalidate(term);
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
break;
|
|
|
|
case 'W': /* word-set */
|
|
|
|
term->termstate = SEEN_OSC_W;
|
|
|
|
term->osc_w = true;
|
|
|
|
break;
|
|
|
|
case '0':
|
|
|
|
case '1':
|
|
|
|
case '2':
|
|
|
|
case '3':
|
|
|
|
case '4':
|
|
|
|
case '5':
|
|
|
|
case '6':
|
|
|
|
case '7':
|
|
|
|
case '8':
|
|
|
|
case '9':
|
|
|
|
if (term->esc_args[term->esc_nargs-1] <= UINT_MAX / 10 &&
|
|
|
|
term->esc_args[term->esc_nargs-1] * 10 <= UINT_MAX - c - '0')
|
|
|
|
term->esc_args[term->esc_nargs-1] =
|
2017-10-05 19:43:02 +00:00
|
|
|
10 * term->esc_args[term->esc_nargs-1] + c - '0';
|
2019-09-08 19:29:00 +00:00
|
|
|
else
|
|
|
|
term->esc_args[term->esc_nargs-1] = UINT_MAX;
|
|
|
|
break;
|
2022-05-11 19:07:31 +00:00
|
|
|
case 0x9C:
|
|
|
|
/* Terminate even though we aren't in OSC_STRING yet */
|
|
|
|
do_osc(term);
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
break;
|
|
|
|
case 0xC2:
|
|
|
|
if (in_utf(term)) {
|
|
|
|
/* Or be prepared for the UTF-8 version of that */
|
|
|
|
term->termstate = OSC_MAYBE_ST_UTF8;
|
|
|
|
}
|
|
|
|
break;
|
2017-10-05 19:43:02 +00:00
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* _Most_ other characters here terminate the
|
|
|
|
* immediate parsing of the OSC sequence and go
|
|
|
|
* into OSC_STRING state, but we deal with a
|
|
|
|
* couple of exceptions first.
|
|
|
|
*/
|
|
|
|
if (c == 'L' && term->esc_args[0] == 2) {
|
|
|
|
/*
|
|
|
|
* Grotty hack to support xterm and DECterm title
|
|
|
|
* sequences concurrently.
|
|
|
|
*/
|
|
|
|
term->esc_args[0] = 1;
|
|
|
|
} else if (c == ';' && term->esc_nargs == 1 &&
|
|
|
|
term->esc_args[0] == 4) {
|
|
|
|
/*
|
|
|
|
* xterm's OSC 4 sequence to query the current
|
|
|
|
* RGB value of a colour takes a second
|
|
|
|
* numeric argument which is easiest to parse
|
|
|
|
* using the existing system rather than in
|
|
|
|
* do_osc.
|
|
|
|
*/
|
|
|
|
term->esc_args[term->esc_nargs++] = 0;
|
|
|
|
} else {
|
|
|
|
term->termstate = OSC_STRING;
|
|
|
|
term->osc_strlen = 0;
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case OSC_STRING:
|
|
|
|
/*
|
Charset-aware handling of C1 ST in OSC sequences.
When the terminal is in UTF-8 mode, we accumulate UTF-8 text normally
in the OSC string buffer - but the byte 0x9C is interpreted as the C1
control character String Terminator, which terminates the OSC
sequence. That's not really what you want in UTF-8 mode, because 0x9C
is also a perfectly normal UTF-8 continuation character. For example,
you'd expect this to set the window title to "FÜNF":
echo -ne '\033]0;FÜNF\007'
but in fact, by the sheer chance that Ü is encoded with an 0x9C byte,
you get a window title consisting of "F" followed by an illegal-
encoding marker, and the OSC sequence is terminated abruptly so that
the trailing 'NF' is printed normally to the terminal and then the BEL
generates a beep.
Now, in UTF-8 mode, we only support the C1 control for ST if it
appears in the form of the proper UTF-8 encoding of U+009C. So that
example now 'works', at least in the sense that the terminal considers
the OSC sequence to terminate where the sender expected it to
terminate.
Another case where we interpret 0x9C inappropriately as ST is if the
terminal is in a single-byte character set in which that character is
a printing one. In CP437, for example, you can't set a window title
containing a pound sign, because its encoding is 0x9C.
This commit by itself doesn't make those window titles _work_, in the
sense of coming out looking right. They just mean that the OSC
sequence is not terminated at the wrong place. The actual title
rendering will be fixed in the next commit.
2021-10-16 10:47:06 +00:00
|
|
|
* OSC sequences can be terminated or aborted in
|
|
|
|
* various ways.
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
Charset-aware handling of C1 ST in OSC sequences.
When the terminal is in UTF-8 mode, we accumulate UTF-8 text normally
in the OSC string buffer - but the byte 0x9C is interpreted as the C1
control character String Terminator, which terminates the OSC
sequence. That's not really what you want in UTF-8 mode, because 0x9C
is also a perfectly normal UTF-8 continuation character. For example,
you'd expect this to set the window title to "FÜNF":
echo -ne '\033]0;FÜNF\007'
but in fact, by the sheer chance that Ü is encoded with an 0x9C byte,
you get a window title consisting of "F" followed by an illegal-
encoding marker, and the OSC sequence is terminated abruptly so that
the trailing 'NF' is printed normally to the terminal and then the BEL
generates a beep.
Now, in UTF-8 mode, we only support the C1 control for ST if it
appears in the form of the proper UTF-8 encoding of U+009C. So that
example now 'works', at least in the sense that the terminal considers
the OSC sequence to terminate where the sender expected it to
terminate.
Another case where we interpret 0x9C inappropriately as ST is if the
terminal is in a single-byte character set in which that character is
a printing one. In CP437, for example, you can't set a window title
containing a pound sign, because its encoding is 0x9C.
This commit by itself doesn't make those window titles _work_, in the
sense of coming out looking right. They just mean that the OSC
sequence is not terminated at the wrong place. The actual title
rendering will be fixed in the next commit.
2021-10-16 10:47:06 +00:00
|
|
|
* The official way to terminate an OSC, per written
|
|
|
|
* standards, is the String Terminator, SC. That can
|
|
|
|
* appear in a 7-bit two-character form ESC \, or as
|
|
|
|
* an 8-bit C1 control 0x9C.
|
|
|
|
*
|
|
|
|
* We only accept 0x9C in circumstances where it
|
|
|
|
* doesn't interfere with our main character set
|
|
|
|
* processing: so in ISO 8859-1, for example, the byte
|
|
|
|
* 0x9C is interpreted as ST, but in CP437 it's
|
|
|
|
* interpreted as an ordinary printing character (as
|
|
|
|
* it happens, the pound sign), because you might
|
|
|
|
* perfectly well want to put it in the window title
|
|
|
|
* like any other printing character.
|
|
|
|
*
|
|
|
|
* In particular, in UTF-8 mode, 0x9C is a perfectly
|
|
|
|
* valid continuation byte for an ordinary printing
|
|
|
|
* character, so we don't accept the C1 control form
|
|
|
|
* of ST unless it appears as a full UTF-8 character
|
|
|
|
* in its own right, i.e. bytes 0xC2 0x9C.
|
|
|
|
*
|
|
|
|
* BEL is also treated as a clean termination of OSC,
|
|
|
|
* which I believe was a behaviour introduced by
|
|
|
|
* xterm.
|
|
|
|
*
|
|
|
|
* To prevent run-on storage of OSC data forever if
|
|
|
|
* emission of a control sequence is interrupted, we
|
|
|
|
* also treat various control characters as illegal,
|
|
|
|
* so that they abort the OSC without processing it
|
|
|
|
* and return to TOPLEVEL state. These are CR, LF, and
|
|
|
|
* any ESC that is *not* followed by \.
|
2019-09-08 19:29:00 +00:00
|
|
|
*/
|
Charset-aware handling of C1 ST in OSC sequences.
When the terminal is in UTF-8 mode, we accumulate UTF-8 text normally
in the OSC string buffer - but the byte 0x9C is interpreted as the C1
control character String Terminator, which terminates the OSC
sequence. That's not really what you want in UTF-8 mode, because 0x9C
is also a perfectly normal UTF-8 continuation character. For example,
you'd expect this to set the window title to "FÜNF":
echo -ne '\033]0;FÜNF\007'
but in fact, by the sheer chance that Ü is encoded with an 0x9C byte,
you get a window title consisting of "F" followed by an illegal-
encoding marker, and the OSC sequence is terminated abruptly so that
the trailing 'NF' is printed normally to the terminal and then the BEL
generates a beep.
Now, in UTF-8 mode, we only support the C1 control for ST if it
appears in the form of the proper UTF-8 encoding of U+009C. So that
example now 'works', at least in the sense that the terminal considers
the OSC sequence to terminate where the sender expected it to
terminate.
Another case where we interpret 0x9C inappropriately as ST is if the
terminal is in a single-byte character set in which that character is
a printing one. In CP437, for example, you can't set a window title
containing a pound sign, because its encoding is 0x9C.
This commit by itself doesn't make those window titles _work_, in the
sense of coming out looking right. They just mean that the OSC
sequence is not terminated at the wrong place. The actual title
rendering will be fixed in the next commit.
2021-10-16 10:47:06 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (c == '\012' || c == '\015') {
|
Charset-aware handling of C1 ST in OSC sequences.
When the terminal is in UTF-8 mode, we accumulate UTF-8 text normally
in the OSC string buffer - but the byte 0x9C is interpreted as the C1
control character String Terminator, which terminates the OSC
sequence. That's not really what you want in UTF-8 mode, because 0x9C
is also a perfectly normal UTF-8 continuation character. For example,
you'd expect this to set the window title to "FÜNF":
echo -ne '\033]0;FÜNF\007'
but in fact, by the sheer chance that Ü is encoded with an 0x9C byte,
you get a window title consisting of "F" followed by an illegal-
encoding marker, and the OSC sequence is terminated abruptly so that
the trailing 'NF' is printed normally to the terminal and then the BEL
generates a beep.
Now, in UTF-8 mode, we only support the C1 control for ST if it
appears in the form of the proper UTF-8 encoding of U+009C. So that
example now 'works', at least in the sense that the terminal considers
the OSC sequence to terminate where the sender expected it to
terminate.
Another case where we interpret 0x9C inappropriately as ST is if the
terminal is in a single-byte character set in which that character is
a printing one. In CP437, for example, you can't set a window title
containing a pound sign, because its encoding is 0x9C.
This commit by itself doesn't make those window titles _work_, in the
sense of coming out looking right. They just mean that the OSC
sequence is not terminated at the wrong place. The actual title
rendering will be fixed in the next commit.
2021-10-16 10:47:06 +00:00
|
|
|
/* CR or LF aborts */
|
2019-09-08 19:29:00 +00:00
|
|
|
term->termstate = TOPLEVEL;
|
Charset-aware handling of C1 ST in OSC sequences.
When the terminal is in UTF-8 mode, we accumulate UTF-8 text normally
in the OSC string buffer - but the byte 0x9C is interpreted as the C1
control character String Terminator, which terminates the OSC
sequence. That's not really what you want in UTF-8 mode, because 0x9C
is also a perfectly normal UTF-8 continuation character. For example,
you'd expect this to set the window title to "FÜNF":
echo -ne '\033]0;FÜNF\007'
but in fact, by the sheer chance that Ü is encoded with an 0x9C byte,
you get a window title consisting of "F" followed by an illegal-
encoding marker, and the OSC sequence is terminated abruptly so that
the trailing 'NF' is printed normally to the terminal and then the BEL
generates a beep.
Now, in UTF-8 mode, we only support the C1 control for ST if it
appears in the form of the proper UTF-8 encoding of U+009C. So that
example now 'works', at least in the sense that the terminal considers
the OSC sequence to terminate where the sender expected it to
terminate.
Another case where we interpret 0x9C inappropriately as ST is if the
terminal is in a single-byte character set in which that character is
a printing one. In CP437, for example, you can't set a window title
containing a pound sign, because its encoding is 0x9C.
This commit by itself doesn't make those window titles _work_, in the
sense of coming out looking right. They just mean that the OSC
sequence is not terminated at the wrong place. The actual title
rendering will be fixed in the next commit.
2021-10-16 10:47:06 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c == '\033') {
|
|
|
|
/* ESC goes into a state where we wait to see if
|
|
|
|
* the next character is \ */
|
|
|
|
term->termstate = OSC_MAYBE_ST;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c == '\007' || (c == 0x9C && !in_utf(term) &&
|
|
|
|
term->ucsdata->unitab_ctrl[c] != 0xFF)) {
|
|
|
|
/* BEL, or the C1 ST appearing as a one-byte
|
|
|
|
* encoding, cleanly terminates the OSC right here */
|
2019-09-08 19:29:00 +00:00
|
|
|
do_osc(term);
|
|
|
|
term->termstate = TOPLEVEL;
|
Charset-aware handling of C1 ST in OSC sequences.
When the terminal is in UTF-8 mode, we accumulate UTF-8 text normally
in the OSC string buffer - but the byte 0x9C is interpreted as the C1
control character String Terminator, which terminates the OSC
sequence. That's not really what you want in UTF-8 mode, because 0x9C
is also a perfectly normal UTF-8 continuation character. For example,
you'd expect this to set the window title to "FÜNF":
echo -ne '\033]0;FÜNF\007'
but in fact, by the sheer chance that Ü is encoded with an 0x9C byte,
you get a window title consisting of "F" followed by an illegal-
encoding marker, and the OSC sequence is terminated abruptly so that
the trailing 'NF' is printed normally to the terminal and then the BEL
generates a beep.
Now, in UTF-8 mode, we only support the C1 control for ST if it
appears in the form of the proper UTF-8 encoding of U+009C. So that
example now 'works', at least in the sense that the terminal considers
the OSC sequence to terminate where the sender expected it to
terminate.
Another case where we interpret 0x9C inappropriately as ST is if the
terminal is in a single-byte character set in which that character is
a printing one. In CP437, for example, you can't set a window title
containing a pound sign, because its encoding is 0x9C.
This commit by itself doesn't make those window titles _work_, in the
sense of coming out looking right. They just mean that the OSC
sequence is not terminated at the wrong place. The actual title
rendering will be fixed in the next commit.
2021-10-16 10:47:06 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c == 0xC2 && in_utf(term)) {
|
|
|
|
/* 0xC2 is the UTF-8 character that might
|
|
|
|
* introduce the encoding of C1 ST */
|
|
|
|
term->termstate = OSC_MAYBE_ST_UTF8;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Anything else gets added to the string */
|
|
|
|
if (term->osc_strlen < OSC_STR_MAX)
|
|
|
|
term->osc_string[term->osc_strlen++] = (char)c;
|
|
|
|
break;
|
|
|
|
case OSC_MAYBE_ST_UTF8:
|
|
|
|
/* In UTF-8 mode, we've seen C2, so are we now seeing
|
|
|
|
* 9C? */
|
|
|
|
if (c == 0x9C) {
|
|
|
|
/* Yes, so cleanly terminate the OSC */
|
|
|
|
do_osc(term);
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* No, so append the pending C2 byte to the OSC string
|
|
|
|
* followed by the current character, and go back to
|
|
|
|
* OSC string accumulation */
|
|
|
|
if (term->osc_strlen < OSC_STR_MAX)
|
|
|
|
term->osc_string[term->osc_strlen++] = 0xC2;
|
|
|
|
if (term->osc_strlen < OSC_STR_MAX)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->osc_string[term->osc_strlen++] = (char)c;
|
Charset-aware handling of C1 ST in OSC sequences.
When the terminal is in UTF-8 mode, we accumulate UTF-8 text normally
in the OSC string buffer - but the byte 0x9C is interpreted as the C1
control character String Terminator, which terminates the OSC
sequence. That's not really what you want in UTF-8 mode, because 0x9C
is also a perfectly normal UTF-8 continuation character. For example,
you'd expect this to set the window title to "FÜNF":
echo -ne '\033]0;FÜNF\007'
but in fact, by the sheer chance that Ü is encoded with an 0x9C byte,
you get a window title consisting of "F" followed by an illegal-
encoding marker, and the OSC sequence is terminated abruptly so that
the trailing 'NF' is printed normally to the terminal and then the BEL
generates a beep.
Now, in UTF-8 mode, we only support the C1 control for ST if it
appears in the form of the proper UTF-8 encoding of U+009C. So that
example now 'works', at least in the sense that the terminal considers
the OSC sequence to terminate where the sender expected it to
terminate.
Another case where we interpret 0x9C inappropriately as ST is if the
terminal is in a single-byte character set in which that character is
a printing one. In CP437, for example, you can't set a window title
containing a pound sign, because its encoding is 0x9C.
This commit by itself doesn't make those window titles _work_, in the
sense of coming out looking right. They just mean that the OSC
sequence is not terminated at the wrong place. The actual title
rendering will be fixed in the next commit.
2021-10-16 10:47:06 +00:00
|
|
|
term->termstate = OSC_STRING;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
case SEEN_OSC_P: {
|
|
|
|
int max = (term->osc_strlen == 0 ? 21 : 15);
|
|
|
|
int val;
|
|
|
|
if ((int)c >= '0' && (int)c <= '9')
|
|
|
|
val = c - '0';
|
|
|
|
else if ((int)c >= 'A' && (int)c <= 'A' + max - 10)
|
|
|
|
val = c - 'A' + 10;
|
|
|
|
else if ((int)c >= 'a' && (int)c <= 'a' + max - 10)
|
|
|
|
val = c - 'a' + 10;
|
|
|
|
else {
|
2021-02-07 19:59:20 +00:00
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
|
|
|
term->osc_string[term->osc_strlen++] = val;
|
|
|
|
if (term->osc_strlen >= 7) {
|
2021-02-07 19:59:20 +00:00
|
|
|
unsigned oscp_index = term->osc_string[0];
|
|
|
|
assert(oscp_index < OSCP_NCOLOURS);
|
|
|
|
unsigned osc4_index =
|
|
|
|
colour_indices_oscp_to_osc4[oscp_index];
|
|
|
|
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
rgb *value = &term->subpalettes[SUBPAL_SESSION].values[
|
|
|
|
osc4_index];
|
|
|
|
value->r = term->osc_string[1] * 16 + term->osc_string[2];
|
|
|
|
value->g = term->osc_string[3] * 16 + term->osc_string[4];
|
|
|
|
value->b = term->osc_string[5] * 16 + term->osc_string[6];
|
|
|
|
term->subpalettes[SUBPAL_SESSION].present[
|
|
|
|
osc4_index] = true;
|
|
|
|
|
|
|
|
palette_rebuild(term);
|
|
|
|
|
2021-02-07 19:59:20 +00:00
|
|
|
term->termstate = TOPLEVEL;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
Formatting change to braces around one case of a switch.
Sometimes, within a switch statement, you want to declare local
variables specific to the handler for one particular case. Until now
I've mostly been writing this in the form
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED:
{
declare variables;
do stuff;
}
break;
}
which is ugly because the two pieces of essentially similar code
appear at different indent levels, and also inconvenient because you
have less horizontal space available to write the complicated case
handler in - particuarly undesirable because _complicated_ case
handlers are the ones most likely to need all the space they can get!
After encountering a rather nicer idiom in the LLVM source code, and
after a bit of hackery this morning figuring out how to persuade
Emacs's auto-indent to do what I wanted with it, I've decided to move
to an idiom in which the open brace comes right after the case
statement, and the code within it is indented the same as it would
have been without the brace. Then the whole case handler (including
the break) lives inside those braces, and you get something that looks
more like this:
switch (discriminant) {
case SIMPLE:
do stuff;
break;
case COMPLICATED: {
declare variables;
do stuff;
break;
}
}
This commit is a big-bang change that reformats all the complicated
case handlers I could find into the new layout. This is particularly
nice in the Pageant main function, in which almost _every_ case
handler had a bundle of variables and was long and complicated. (In
fact that's what motivated me to get round to this.) Some of the
innermost parts of the terminal escape-sequence handling are also
breathing a bit easier now the horizontal pressure on them is
relieved.
(Also, in a few cases, I was able to remove the extra braces
completely, because the only variable local to the case handler was a
loop variable which our new C99 policy allows me to move into the
initialiser clause of its for statement.)
Viewed with whitespace ignored, this is not too disruptive a change.
Downstream patches that conflict with it may need to be reapplied
using --ignore-whitespace or similar.
2020-02-16 07:49:52 +00:00
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
case SEEN_OSC_W:
|
|
|
|
switch (c) {
|
|
|
|
case '0':
|
|
|
|
case '1':
|
|
|
|
case '2':
|
|
|
|
case '3':
|
|
|
|
case '4':
|
|
|
|
case '5':
|
|
|
|
case '6':
|
|
|
|
case '7':
|
|
|
|
case '8':
|
|
|
|
case '9':
|
|
|
|
if (term->esc_args[0] <= UINT_MAX / 10 &&
|
|
|
|
term->esc_args[0] * 10 <= UINT_MAX - c - '0')
|
|
|
|
term->esc_args[0] = 10 * term->esc_args[0] + c - '0';
|
|
|
|
else
|
|
|
|
term->esc_args[0] = UINT_MAX;
|
|
|
|
break;
|
2022-05-11 19:07:31 +00:00
|
|
|
case 0x9C:
|
|
|
|
/* Terminate even though we aren't in OSC_STRING yet */
|
|
|
|
do_osc(term);
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
break;
|
|
|
|
case 0xC2:
|
|
|
|
if (in_utf(term)) {
|
|
|
|
/* Or be prepared for the UTF-8 version of that */
|
|
|
|
term->termstate = OSC_MAYBE_ST_UTF8;
|
|
|
|
}
|
|
|
|
break;
|
2019-09-08 19:29:00 +00:00
|
|
|
default:
|
|
|
|
term->termstate = OSC_STRING;
|
|
|
|
term->osc_strlen = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VT52_ESC:
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
switch (c) {
|
|
|
|
case 'A':
|
|
|
|
move(term, term->curs.x, term->curs.y - 1, 1);
|
|
|
|
break;
|
|
|
|
case 'B':
|
|
|
|
move(term, term->curs.x, term->curs.y + 1, 1);
|
|
|
|
break;
|
|
|
|
case 'C':
|
|
|
|
move(term, term->curs.x + 1, term->curs.y, 1);
|
|
|
|
break;
|
|
|
|
case 'D':
|
|
|
|
move(term, term->curs.x - 1, term->curs.y, 1);
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* From the VT100 Manual
|
|
|
|
* NOTE: The special graphics characters in the VT100
|
|
|
|
* are different from those in the VT52
|
|
|
|
*
|
|
|
|
* From VT102 manual:
|
|
|
|
* 137 _ Blank - Same
|
|
|
|
* 140 ` Reserved - Humm.
|
|
|
|
* 141 a Solid rectangle - Similar
|
|
|
|
* 142 b 1/ - Top half of fraction for the
|
|
|
|
* 143 c 3/ - subscript numbers below.
|
|
|
|
* 144 d 5/
|
|
|
|
* 145 e 7/
|
|
|
|
* 146 f Degrees - Same
|
|
|
|
* 147 g Plus or minus - Same
|
|
|
|
* 150 h Right arrow
|
|
|
|
* 151 i Ellipsis (dots)
|
|
|
|
* 152 j Divide by
|
|
|
|
* 153 k Down arrow
|
|
|
|
* 154 l Bar at scan 0
|
|
|
|
* 155 m Bar at scan 1
|
|
|
|
* 156 n Bar at scan 2
|
|
|
|
* 157 o Bar at scan 3 - Similar
|
|
|
|
* 160 p Bar at scan 4 - Similar
|
|
|
|
* 161 q Bar at scan 5 - Similar
|
|
|
|
* 162 r Bar at scan 6 - Same
|
|
|
|
* 163 s Bar at scan 7 - Similar
|
|
|
|
* 164 t Subscript 0
|
|
|
|
* 165 u Subscript 1
|
|
|
|
* 166 v Subscript 2
|
|
|
|
* 167 w Subscript 3
|
|
|
|
* 170 x Subscript 4
|
|
|
|
* 171 y Subscript 5
|
|
|
|
* 172 z Subscript 6
|
|
|
|
* 173 { Subscript 7
|
|
|
|
* 174 | Subscript 8
|
|
|
|
* 175 } Subscript 9
|
|
|
|
* 176 ~ Paragraph
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
case 'F':
|
|
|
|
term->cset_attr[term->cset = 0] = CSET_LINEDRW;
|
|
|
|
break;
|
|
|
|
case 'G':
|
|
|
|
term->cset_attr[term->cset = 0] = CSET_ASCII;
|
|
|
|
break;
|
|
|
|
case 'H':
|
|
|
|
move(term, 0, 0, 0);
|
|
|
|
break;
|
|
|
|
case 'I':
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
if (term->curs.y == 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
scroll(term, 0, term->rows - 1, -1, true);
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
} else if (term->curs.y > 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curs.y--;
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
seen_disp_event(term);
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
term->wrapnext = false;
|
|
|
|
break;
|
|
|
|
case 'J':
|
|
|
|
erase_lots(term, false, false, true);
|
2014-01-25 18:38:38 +00:00
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 'K':
|
|
|
|
erase_lots(term, true, false, true);
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
#if 0
|
2019-09-08 19:29:00 +00:00
|
|
|
case 'V':
|
|
|
|
/* XXX Print cursor line */
|
|
|
|
break;
|
|
|
|
case 'W':
|
|
|
|
/* XXX Start controller mode */
|
|
|
|
break;
|
|
|
|
case 'X':
|
|
|
|
/* XXX Stop controller mode */
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
#endif
|
2019-09-08 19:29:00 +00:00
|
|
|
case 'Y':
|
|
|
|
term->termstate = VT52_Y1;
|
|
|
|
break;
|
|
|
|
case 'Z':
|
|
|
|
if (term->ldisc)
|
|
|
|
ldisc_send(term->ldisc, "\033/Z", 3, false);
|
|
|
|
break;
|
|
|
|
case '=':
|
|
|
|
term->app_keypad_keys = true;
|
|
|
|
break;
|
|
|
|
case '>':
|
|
|
|
term->app_keypad_keys = false;
|
|
|
|
break;
|
|
|
|
case '<':
|
|
|
|
/* XXX This should switch to VT100 mode not current or default
|
|
|
|
* VT mode. But this will only have effect in a VT220+
|
|
|
|
* emulation.
|
|
|
|
*/
|
|
|
|
term->vt52_mode = false;
|
|
|
|
term->blink_is_real = term->blinktext;
|
|
|
|
term_schedule_tblink(term);
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
#if 0
|
2019-09-08 19:29:00 +00:00
|
|
|
case '^':
|
|
|
|
/* XXX Enter auto print mode */
|
|
|
|
break;
|
|
|
|
case '_':
|
|
|
|
/* XXX Exit auto print mode */
|
|
|
|
break;
|
|
|
|
case ']':
|
|
|
|
/* XXX Print screen */
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef VT52_PLUS
|
2019-09-08 19:29:00 +00:00
|
|
|
case 'E':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
move(term, 0, 0, 0);
|
|
|
|
erase_lots(term, false, false, true);
|
2014-01-25 18:38:38 +00:00
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 'L':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
if (term->curs.y <= term->marg_b)
|
|
|
|
scroll(term, term->curs.y, term->marg_b, -1, false);
|
|
|
|
break;
|
|
|
|
case 'M':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
if (term->curs.y <= term->marg_b)
|
|
|
|
scroll(term, term->curs.y, term->marg_b, 1, true);
|
|
|
|
break;
|
|
|
|
case 'b':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
term->termstate = VT52_FG;
|
|
|
|
break;
|
|
|
|
case 'c':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
term->termstate = VT52_BG;
|
|
|
|
break;
|
|
|
|
case 'd':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
erase_lots(term, false, true, false);
|
2014-01-25 18:38:38 +00:00
|
|
|
if (term->scroll_on_disp)
|
|
|
|
term->disptop = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 'e':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
term->cursor_on = true;
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
seen_disp_event(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case 'f':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
term->cursor_on = false;
|
Further reorganisations of seen_disp_event().
Shortly after the previous commit I spotted another definitely missing
display update: if you send the byte 0x7F, aka 'destructive
backspace', then the display didn't update immediately.
That was two in a row, so I did an eyeball review of the whole
terminal state machine to the best of my ability. Found a couple more
borderline ones, but also, found that the entire VT52 sub-state-
machine had a blanket seen_disp_event which really _shouldn't_ have
been there, because half the VT52 sequences aren't actually display-
modifying updates.
To make this _slightly_ less error-prone, I've sunk a number of
seen_disp_update calls into subroutines that aren't the top-level
term_out(). For example, erase_lots(), scroll(), move() and
swap_screen() now all call seen_disp_update within themselves, so
their call sites don't all have to remember to.
There are probably further bugs after this upheaval, but I think it's
moving in generally the right direction.
2023-09-25 19:43:55 +00:00
|
|
|
seen_disp_event(term);
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
/* case 'j': Save cursor position - broken on ST */
|
|
|
|
/* case 'k': Restore cursor position */
|
|
|
|
case 'l':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
erase_lots(term, true, true, true);
|
|
|
|
term->curs.x = 0;
|
|
|
|
term->wrapnext = false;
|
|
|
|
break;
|
|
|
|
case 'o':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
erase_lots(term, true, true, false);
|
|
|
|
break;
|
|
|
|
case 'p':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
term->curr_attr |= ATTR_REVERSE;
|
|
|
|
break;
|
|
|
|
case 'q':
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
term->curr_attr &= ~ATTR_REVERSE;
|
|
|
|
break;
|
|
|
|
case 'v': /* wrap Autowrap on - Wyse style */
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
term->wrap = true;
|
|
|
|
break;
|
|
|
|
case 'w': /* Autowrap off */
|
|
|
|
/* compatibility(ATARI) */
|
|
|
|
term->wrap = false;
|
Don't set term->wrapnext when not in auto-wrapping mode.
A user sent a transcript from a curses-based tool 'ncmpc', which
carefully disables terminal autowrap when printing a character in the
bottom right corner of the display, and then turns it back on again.
After that, it expects that sending the backspace character really
moves the cursor back a space, instead of clearing the wrapnext flag.
But in PuTTY, we set the wrapnext flag even if we're not in wrapping
mode - it just doesn't _do_ anything when the next character is sent.
But it remains set, and still affects backspace. So the display is
corrupted by this change of expectation.
(Specifically, ncmpc is printing a time display [m:ss] in the very
bottom right, so it disables wrap in order to print the final ']'.
Then the next thing it needs to do is to update the low-order digit of
the seconds field, so it sends \b as the simplest way to get to that
character. The effect on the display is that the updated seconds digit
appears where the ] was, instead of overwriting the old seconds digit.)
This is a tradeoff in desirable behaviours. The point of having a
backspace operation cancel the wrapnext flag and not actually move the
cursor is to preserve the invariant that sending 'x', backspace, 'y'
causes the y to overprint the x, even if that happens near the end of
the terminal's line length. In non-wrapping mode that invariant was
bound to break _eventually_, but with this change, it breaks one
character earlier than before. However, I think that's less bad than
breaking the expectations of curses-based full-screen applications,
especially since the _main_ need for that invariant arises from naïve
applications that don't want to have to think about the terminal width
at all - and those applications generally run in _wrapping_ mode,
where it's possible to continue the invariant across multiple lines in
any case.
2024-08-10 09:38:02 +00:00
|
|
|
term->wrapnext = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'R':
|
|
|
|
/* compatibility(OTHER) */
|
|
|
|
term->vt52_bold = false;
|
|
|
|
term->curr_attr = ATTR_DEFAULT;
|
2017-10-08 12:47:39 +00:00
|
|
|
term->curr_truecolour.fg = optionalrgb_none;
|
|
|
|
term->curr_truecolour.bg = optionalrgb_none;
|
2019-09-08 19:29:00 +00:00
|
|
|
set_erase_char(term);
|
|
|
|
break;
|
|
|
|
case 'S':
|
|
|
|
/* compatibility(VI50) */
|
|
|
|
term->curr_attr |= ATTR_UNDER;
|
|
|
|
break;
|
|
|
|
case 'W':
|
|
|
|
/* compatibility(VI50) */
|
|
|
|
term->curr_attr &= ~ATTR_UNDER;
|
|
|
|
break;
|
|
|
|
case 'U':
|
|
|
|
/* compatibility(VI50) */
|
|
|
|
term->vt52_bold = true;
|
|
|
|
term->curr_attr |= ATTR_BOLD;
|
|
|
|
break;
|
|
|
|
case 'T':
|
|
|
|
/* compatibility(VI50) */
|
|
|
|
term->vt52_bold = false;
|
|
|
|
term->curr_attr &= ~ATTR_BOLD;
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
#endif
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VT52_Y1:
|
|
|
|
term->termstate = VT52_Y2;
|
|
|
|
move(term, term->curs.x, c - ' ', 0);
|
|
|
|
break;
|
|
|
|
case VT52_Y2:
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
move(term, c - ' ', term->curs.y, 0);
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
|
|
|
|
#ifdef VT52_PLUS
|
2019-09-08 19:29:00 +00:00
|
|
|
case VT52_FG:
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
term->curr_attr &= ~ATTR_FGMASK;
|
|
|
|
term->curr_attr &= ~ATTR_BOLD;
|
|
|
|
term->curr_attr |= (c & 0xF) << ATTR_FGSHIFT;
|
|
|
|
set_erase_char(term);
|
|
|
|
break;
|
|
|
|
case VT52_BG:
|
|
|
|
term->termstate = TOPLEVEL;
|
|
|
|
term->curr_attr &= ~ATTR_BGMASK;
|
|
|
|
term->curr_attr &= ~ATTR_BLINK;
|
|
|
|
term->curr_attr |= (c & 0xF) << ATTR_BGSHIFT;
|
|
|
|
set_erase_char(term);
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
#endif
|
2019-09-08 19:29:00 +00:00
|
|
|
default: break; /* placate gcc warning about enum use */
|
|
|
|
}
|
|
|
|
if (term->selstate != NO_SELECTION) {
|
|
|
|
pos cursplus = term->curs;
|
|
|
|
incpos(cursplus);
|
|
|
|
check_selection(term, term->curs, cursplus);
|
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
2002-03-09 17:59:15 +00:00
|
|
|
|
2021-12-18 15:07:41 +00:00
|
|
|
bufchain_consume(&term->inbuf, nchars_used);
|
|
|
|
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
if (!called_from_term_data)
|
|
|
|
win_unthrottle(term->win, bufchain_size(&term->inbuf));
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
term_print_flush(term);
|
2015-10-06 10:02:52 +00:00
|
|
|
if (term->logflush && term->logctx)
|
2019-09-08 19:29:00 +00:00
|
|
|
logflush(term->logctx);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
Suspend terminal output while a window resize is pending.
This is the payoff from the last few commits of refactoring. It fixes
the following race-condition bug in terminal application redraw:
* server sends a window-resizing escape sequence
* terminal requests a window resize from the front end
* server sends further escape sequences to perform a redraw of some
full-screen application, which assume that the window resize has
occurred and the window is already its new size
* terminal processes all those sequences in the context of the old
window size, while the front end is still thinking
* window resize completes in the front end and term_size() tells the
terminal it now has its new size, but it's too late, the screen
redraw has made a total mess.
(Perhaps the server might even send its window resize + followup
redraw all in one SSH packet, so that it's all queued in term->inbuf
in one go.)
As far as I can see, handling of this case has been broken more or
less forever in the GTK frontend (where window resizes are inherently
asynchronous due to the way X11 works, and we've never done anything
to compensate for that). On Windows, where window size is changed via
SetWindowPos which is synchronous, it used to work, but broke in
commit d74308e90e3813a (i.e. between 0.74 and 0.75), which made all
the ancillary window updates run on the same delayed-action timer as
ordinary text display.
So, it's time to fix it, and I think now I should be able to fix it in
GTK as well as on Windows.
Now, as soon as we've set the term->win_resize_pending flag (in
response to a resize escape sequence), the next return to the top of
the main loop in term_out will terminate output processing early,
leaving any further terminal data still in the term->inbuf bufchain.
Once we get a term_size() callback from the front end telling us our
new size, we reset term->win_resize_pending, which unblocks output
processing again, and we also queue a toplevel callback to have
another try at term_out() so that it will be unblocked promptly.
To implement this I've changed term->win_resize_pending from a bool
into a three-state enumeration, so that we can tell the difference
between 'pending' in the sense of not yet having sent our resize
request to the frontend, and in the sense of waiting for the frontend
to reply. That way, a window resize from the GUI user at least won't
be mistaken for the response to our resize request if it arrives in
the former state. (It can still be mistaken for one in the latter
case, but if the user is resizing the window at the same time as the
server-side application is doing critically size-dependent redrawing,
I don't think there can be any reasonable expectation of nothing going
wrong.)
As mentioned in the previous commit, some failure modes under X11 (in
particular the window manager process getting wedged in some way) can
result in no response being received to a ConfigureWindow request. In
that situation, it seems to me that we really _shouldn't_ sit there
waiting forever - perhaps it's technically the WM's fault and not
ours, but what kind of X window are you most likely to want to use to
do emergency WM repair? A terminal window, of course, so it would be
exceptionally unhelpful to make any terminal window stop working
completely in this situation! Hence, there's a fallback timeout in
terminal.c, so that if we don't receive a response in _too_ long,
we'll assume one is not forthcoming, and resume processing terminal
data at the old window size. The fallback timeout is set to 5 seconds,
following existing practice in libXt (DEFAULT_WM_TIMEOUT).
2021-12-19 10:37:02 +00:00
|
|
|
/* Wrapper on term_out with the right prototype to be a toplevel callback */
|
|
|
|
void term_out_cb(void *ctx)
|
|
|
|
{
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
term_out((Terminal *)ctx, false);
|
Suspend terminal output while a window resize is pending.
This is the payoff from the last few commits of refactoring. It fixes
the following race-condition bug in terminal application redraw:
* server sends a window-resizing escape sequence
* terminal requests a window resize from the front end
* server sends further escape sequences to perform a redraw of some
full-screen application, which assume that the window resize has
occurred and the window is already its new size
* terminal processes all those sequences in the context of the old
window size, while the front end is still thinking
* window resize completes in the front end and term_size() tells the
terminal it now has its new size, but it's too late, the screen
redraw has made a total mess.
(Perhaps the server might even send its window resize + followup
redraw all in one SSH packet, so that it's all queued in term->inbuf
in one go.)
As far as I can see, handling of this case has been broken more or
less forever in the GTK frontend (where window resizes are inherently
asynchronous due to the way X11 works, and we've never done anything
to compensate for that). On Windows, where window size is changed via
SetWindowPos which is synchronous, it used to work, but broke in
commit d74308e90e3813a (i.e. between 0.74 and 0.75), which made all
the ancillary window updates run on the same delayed-action timer as
ordinary text display.
So, it's time to fix it, and I think now I should be able to fix it in
GTK as well as on Windows.
Now, as soon as we've set the term->win_resize_pending flag (in
response to a resize escape sequence), the next return to the top of
the main loop in term_out will terminate output processing early,
leaving any further terminal data still in the term->inbuf bufchain.
Once we get a term_size() callback from the front end telling us our
new size, we reset term->win_resize_pending, which unblocks output
processing again, and we also queue a toplevel callback to have
another try at term_out() so that it will be unblocked promptly.
To implement this I've changed term->win_resize_pending from a bool
into a three-state enumeration, so that we can tell the difference
between 'pending' in the sense of not yet having sent our resize
request to the frontend, and in the sense of waiting for the frontend
to reply. That way, a window resize from the GUI user at least won't
be mistaken for the response to our resize request if it arrives in
the former state. (It can still be mistaken for one in the latter
case, but if the user is resizing the window at the same time as the
server-side application is doing critically size-dependent redrawing,
I don't think there can be any reasonable expectation of nothing going
wrong.)
As mentioned in the previous commit, some failure modes under X11 (in
particular the window manager process getting wedged in some way) can
result in no response being received to a ConfigureWindow request. In
that situation, it seems to me that we really _shouldn't_ sit there
waiting forever - perhaps it's technically the WM's fault and not
ours, but what kind of X window are you most likely to want to use to
do emergency WM repair? A terminal window, of course, so it would be
exceptionally unhelpful to make any terminal window stop working
completely in this situation! Hence, there's a fallback timeout in
terminal.c, so that if we don't receive a response in _too_ long,
we'll assume one is not forthcoming, and resume processing terminal
data at the old window size. The fallback timeout is set to 5 seconds,
following existing practice in libXt (DEFAULT_WM_TIMEOUT).
2021-12-19 10:37:02 +00:00
|
|
|
}
|
|
|
|
|
2017-09-30 16:32:32 +00:00
|
|
|
/*
|
|
|
|
* Small subroutine to parse three consecutive escape-sequence
|
|
|
|
* arguments representing a true-colour RGB triple into an
|
|
|
|
* optionalrgb.
|
|
|
|
*/
|
|
|
|
static void parse_optionalrgb(optionalrgb *out, unsigned *values)
|
|
|
|
{
|
2018-10-29 19:50:29 +00:00
|
|
|
out->enabled = true;
|
2017-09-30 16:32:32 +00:00
|
|
|
out->r = values[0] < 256 ? values[0] : 0;
|
|
|
|
out->g = values[1] < 256 ? values[1] : 0;
|
|
|
|
out->b = values[2] < 256 ? values[2] : 0;
|
|
|
|
}
|
|
|
|
|
2004-05-22 10:36:50 +00:00
|
|
|
/*
|
|
|
|
* To prevent having to run the reasonably tricky bidi algorithm
|
|
|
|
* too many times, we maintain a cache of the last lineful of data
|
|
|
|
* fed to the algorithm on each line of the display.
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static bool term_bidi_cache_hit(Terminal *term, int line,
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
termchar *lbefore, int width, bool trusted)
|
2004-05-22 10:36:50 +00:00
|
|
|
{
|
2004-10-14 16:42:43 +00:00
|
|
|
int i;
|
|
|
|
|
2004-05-22 10:36:50 +00:00
|
|
|
if (!term->pre_bidi_cache)
|
2019-09-08 19:29:00 +00:00
|
|
|
return false; /* cache doesn't even exist yet! */
|
2004-05-22 10:36:50 +00:00
|
|
|
|
|
|
|
if (line >= term->bidi_cache_size)
|
2019-09-08 19:29:00 +00:00
|
|
|
return false; /* cache doesn't have this many lines */
|
2004-05-22 10:36:50 +00:00
|
|
|
|
2004-10-15 11:11:19 +00:00
|
|
|
if (!term->pre_bidi_cache[line].chars)
|
2019-09-08 19:29:00 +00:00
|
|
|
return false; /* cache doesn't contain _this_ line */
|
2004-05-22 10:36:50 +00:00
|
|
|
|
2004-10-15 11:11:19 +00:00
|
|
|
if (term->pre_bidi_cache[line].width != width)
|
2019-09-08 19:29:00 +00:00
|
|
|
return false; /* line is wrong width */
|
2004-10-15 11:11:19 +00:00
|
|
|
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (term->pre_bidi_cache[line].trusted != trusted)
|
2019-09-08 19:29:00 +00:00
|
|
|
return false; /* line has wrong trust state */
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
|
2004-10-14 16:42:43 +00:00
|
|
|
for (i = 0; i < width; i++)
|
2019-09-08 19:29:00 +00:00
|
|
|
if (!termchars_equal(term->pre_bidi_cache[line].chars+i, lbefore+i))
|
|
|
|
return false; /* line doesn't match cache */
|
2004-05-22 10:36:50 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
return true; /* it didn't match. */
|
2004-05-22 10:36:50 +00:00
|
|
|
}
|
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
static void term_bidi_cache_store(Terminal *term, int line, termchar *lbefore,
|
2019-09-08 19:29:00 +00:00
|
|
|
termchar *lafter, bidi_char *wcTo,
|
|
|
|
int width, int size, bool trusted)
|
2004-05-22 10:36:50 +00:00
|
|
|
{
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
size_t i, j;
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2004-05-22 10:36:50 +00:00
|
|
|
if (!term->pre_bidi_cache || term->bidi_cache_size <= line) {
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
j = term->bidi_cache_size;
|
|
|
|
sgrowarray(term->pre_bidi_cache, term->bidi_cache_size, line);
|
2019-09-08 19:29:00 +00:00
|
|
|
term->post_bidi_cache = sresize(term->post_bidi_cache,
|
|
|
|
term->bidi_cache_size,
|
|
|
|
struct bidi_cache_entry);
|
|
|
|
while (j < term->bidi_cache_size) {
|
|
|
|
term->pre_bidi_cache[j].chars =
|
|
|
|
term->post_bidi_cache[j].chars = NULL;
|
|
|
|
term->pre_bidi_cache[j].width =
|
|
|
|
term->post_bidi_cache[j].width = -1;
|
|
|
|
term->pre_bidi_cache[j].trusted = false;
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
term->post_bidi_cache[j].trusted = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
term->pre_bidi_cache[j].forward =
|
|
|
|
term->post_bidi_cache[j].forward = NULL;
|
|
|
|
term->pre_bidi_cache[j].backward =
|
|
|
|
term->post_bidi_cache[j].backward = NULL;
|
|
|
|
j++;
|
|
|
|
}
|
2004-05-22 10:36:50 +00:00
|
|
|
}
|
|
|
|
|
2004-10-15 11:11:19 +00:00
|
|
|
sfree(term->pre_bidi_cache[line].chars);
|
|
|
|
sfree(term->post_bidi_cache[line].chars);
|
2004-11-30 13:39:58 +00:00
|
|
|
sfree(term->post_bidi_cache[line].forward);
|
|
|
|
sfree(term->post_bidi_cache[line].backward);
|
2004-05-22 10:36:50 +00:00
|
|
|
|
2004-10-15 11:11:19 +00:00
|
|
|
term->pre_bidi_cache[line].width = width;
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
term->pre_bidi_cache[line].trusted = trusted;
|
2004-12-01 09:25:20 +00:00
|
|
|
term->pre_bidi_cache[line].chars = snewn(size, termchar);
|
2004-10-15 11:11:19 +00:00
|
|
|
term->post_bidi_cache[line].width = width;
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
term->post_bidi_cache[line].trusted = trusted;
|
2004-12-01 09:25:20 +00:00
|
|
|
term->post_bidi_cache[line].chars = snewn(size, termchar);
|
2004-11-28 09:24:57 +00:00
|
|
|
term->post_bidi_cache[line].forward = snewn(width, int);
|
|
|
|
term->post_bidi_cache[line].backward = snewn(width, int);
|
2004-05-22 10:36:50 +00:00
|
|
|
|
2004-12-01 09:25:20 +00:00
|
|
|
memcpy(term->pre_bidi_cache[line].chars, lbefore, size * TSIZE);
|
|
|
|
memcpy(term->post_bidi_cache[line].chars, lafter, size * TSIZE);
|
2004-11-28 09:24:57 +00:00
|
|
|
memset(term->post_bidi_cache[line].forward, 0, width * sizeof(int));
|
|
|
|
memset(term->post_bidi_cache[line].backward, 0, width * sizeof(int));
|
|
|
|
|
2019-02-26 18:32:44 +00:00
|
|
|
for (i = j = 0; j < width; j += wcTo[i].nchars, i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
int p = wcTo[i].index;
|
2004-11-28 09:24:57 +00:00
|
|
|
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (p != BIDI_CHAR_INDEX_NONE) {
|
|
|
|
assert(0 <= p && p < width);
|
2004-11-28 09:24:57 +00:00
|
|
|
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
for (int x = 0; x < wcTo[i].nchars; x++) {
|
|
|
|
term->post_bidi_cache[line].backward[j+x] = p+x;
|
|
|
|
term->post_bidi_cache[line].forward[p+x] = j+x;
|
|
|
|
}
|
2019-02-26 18:32:44 +00:00
|
|
|
}
|
2004-11-28 09:24:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare the bidi information for a screen line. Returns the
|
|
|
|
* transformed list of termchars, or NULL if no transformation at
|
|
|
|
* all took place (because bidi is disabled). If return was
|
|
|
|
* non-NULL, auxiliary information such as the forward and reverse
|
|
|
|
* mappings of permutation position are available in
|
|
|
|
* term->post_bidi_cache[scr_y].*.
|
|
|
|
*/
|
|
|
|
static termchar *term_bidi_line(Terminal *term, struct termline *ldata,
|
2019-09-08 19:29:00 +00:00
|
|
|
int scr_y)
|
2004-11-28 09:24:57 +00:00
|
|
|
{
|
|
|
|
termchar *lchars;
|
|
|
|
int it;
|
|
|
|
|
|
|
|
/* Do Arabic shaping and bidi. */
|
2019-03-26 21:13:19 +00:00
|
|
|
if (!term->no_bidi || !term->no_arabicshaping ||
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
(ldata->trusted && term->cols > TRUST_SIGIL_WIDTH)) {
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (!term_bidi_cache_hit(term, scr_y, ldata->chars, term->cols,
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
ldata->trusted)) {
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->wcFromTo_size < term->cols) {
|
|
|
|
term->wcFromTo_size = term->cols;
|
|
|
|
term->wcFrom = sresize(term->wcFrom, term->wcFromTo_size,
|
|
|
|
bidi_char);
|
|
|
|
term->wcTo = sresize(term->wcTo, term->wcFromTo_size,
|
|
|
|
bidi_char);
|
|
|
|
}
|
|
|
|
|
2022-12-28 15:37:57 +00:00
|
|
|
for (it=0; it<term->cols ; it++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
unsigned long uc = (ldata->chars[it].chr);
|
|
|
|
|
|
|
|
switch (uc & CSET_MASK) {
|
|
|
|
case CSET_LINEDRW:
|
|
|
|
if (!term->rawcnp) {
|
|
|
|
uc = term->ucsdata->unitab_xterm[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CSET_ASCII:
|
|
|
|
uc = term->ucsdata->unitab_line[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_SCOACS:
|
|
|
|
uc = term->ucsdata->unitab_scoacs[uc&0xFF];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
switch (uc & CSET_MASK) {
|
|
|
|
case CSET_ACP:
|
|
|
|
uc = term->ucsdata->unitab_font[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_OEMCP:
|
|
|
|
uc = term->ucsdata->unitab_oemcp[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
term->wcFrom[it].origwc = term->wcFrom[it].wc =
|
|
|
|
(unsigned int)uc;
|
|
|
|
term->wcFrom[it].index = it;
|
|
|
|
term->wcFrom[it].nchars = 1;
|
|
|
|
}
|
2004-11-28 09:24:57 +00:00
|
|
|
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (ldata->trusted && term->cols > TRUST_SIGIL_WIDTH) {
|
|
|
|
memmove(
|
|
|
|
term->wcFrom + TRUST_SIGIL_WIDTH, term->wcFrom,
|
|
|
|
(term->cols - TRUST_SIGIL_WIDTH) * sizeof(*term->wcFrom));
|
|
|
|
for (it = 0; it < TRUST_SIGIL_WIDTH; it++) {
|
|
|
|
term->wcFrom[it].origwc = term->wcFrom[it].wc =
|
|
|
|
(it == 0 ? TRUST_SIGIL_CHAR :
|
|
|
|
it == 1 ? UCSWIDE : ' ');
|
|
|
|
term->wcFrom[it].index = BIDI_CHAR_INDEX_NONE;
|
|
|
|
term->wcFrom[it].nchars = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-25 20:51:17 +00:00
|
|
|
int nbc = 0;
|
|
|
|
for (it = 0; it < term->cols; it++) {
|
|
|
|
term->wcFrom[nbc] = term->wcFrom[it];
|
|
|
|
if (it+1 < term->cols && term->wcFrom[it+1].wc == UCSWIDE) {
|
|
|
|
term->wcFrom[nbc].nchars++;
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
nbc++;
|
|
|
|
}
|
|
|
|
|
2022-12-28 15:32:24 +00:00
|
|
|
if (!term->no_bidi)
|
2021-10-10 13:40:51 +00:00
|
|
|
do_bidi(term->bidi_ctx, term->wcFrom, nbc);
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2022-12-28 15:32:24 +00:00
|
|
|
if (!term->no_arabicshaping) {
|
2019-09-08 19:29:00 +00:00
|
|
|
do_shape(term->wcFrom, term->wcTo, nbc);
|
term_bidi_line: fix failure to initialise wcTo.
The bidi algorithm is called on the array term->wcFrom, modifying it
in place. Then the Arabic-shaping algorithm - which can't work in
place because it needs to check the original value of array entries
it's already modified - is called, copying term->wcFrom to term->wcTo
as a side effect. Then the cleanup code expects the final version of
the line to be in wcTo. So if shaping is turned off, we still need to
copy wcFrom into wcTo, even if we don't modify it en route.
Previously, that copy was done under an if statement whose condition
boils down to 'if bidi is enabled but shaping is not'. So if that code
was ever reached with _both_ bidi and shaping turned off, then nothing
at all would copy wcFrom into wcTo, and wcTo would be filled with
nonsense.
Before trust sigils were introduced, that was OK, because the whole
function body was skipped if both bidi and shaping were turned off.
But now trust-sigil handling lives in there too, so we can get into
that code with the previously disallowed combination of flags. If
you're lucky, this means that the assert(opos == term->cols) near the
bottom of the function fails, on the basis that opos is the sum of
nonsense values from wcTo; if you're unlucky I suppose you might
manage to get _plausible_ nonsense through to the screen.
Now fixed, by changing that central if statement into a much more
obvious one: if we're running do_shape, then that can copy wcFrom into
wcTo, and if and only if we're _not_, then we must copy it another
way. (And while I'm here, I've turned that other way from a manual for
loop into memcpy.)
2019-03-26 21:05:12 +00:00
|
|
|
} else {
|
|
|
|
/* If we're not calling do_shape, we must copy the
|
|
|
|
* data into wcTo anyway, unchanged */
|
|
|
|
memcpy(term->wcTo, term->wcFrom, nbc * sizeof(*term->wcTo));
|
|
|
|
}
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->ltemp_size < ldata->size) {
|
|
|
|
term->ltemp_size = ldata->size;
|
|
|
|
term->ltemp = sresize(term->ltemp, term->ltemp_size,
|
|
|
|
termchar);
|
|
|
|
}
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
memcpy(term->ltemp, ldata->chars, ldata->size * TSIZE);
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-02-25 20:51:17 +00:00
|
|
|
int opos = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
for (it=0; it<nbc; it++) {
|
2019-02-25 20:51:17 +00:00
|
|
|
int ipos = term->wcTo[it].index;
|
|
|
|
for (int j = 0; j < term->wcTo[it].nchars; j++) {
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (ipos != BIDI_CHAR_INDEX_NONE) {
|
|
|
|
term->ltemp[opos] = ldata->chars[ipos];
|
|
|
|
if (term->ltemp[opos].cc_next)
|
|
|
|
term->ltemp[opos].cc_next -= opos - ipos;
|
|
|
|
|
|
|
|
if (j > 0)
|
|
|
|
term->ltemp[opos].chr = UCSWIDE;
|
|
|
|
else if (term->wcTo[it].origwc != term->wcTo[it].wc)
|
|
|
|
term->ltemp[opos].chr = term->wcTo[it].wc;
|
|
|
|
} else {
|
|
|
|
term->ltemp[opos] = term->basic_erase_char;
|
|
|
|
term->ltemp[opos].chr =
|
|
|
|
j > 0 ? UCSWIDE : term->wcTo[it].origwc;
|
|
|
|
}
|
2019-02-25 20:51:17 +00:00
|
|
|
opos++;
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
2019-02-25 20:51:17 +00:00
|
|
|
assert(opos == term->cols);
|
2019-09-08 19:29:00 +00:00
|
|
|
term_bidi_cache_store(term, scr_y, ldata->chars,
|
|
|
|
term->ltemp, term->wcTo,
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
term->cols, ldata->size, ldata->trusted);
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
lchars = term->ltemp;
|
|
|
|
} else {
|
|
|
|
lchars = term->post_bidi_cache[scr_y].chars;
|
|
|
|
}
|
2004-11-28 09:24:57 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
lchars = NULL;
|
2004-11-28 09:24:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return lchars;
|
2004-05-22 10:36:50 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 07:12:13 +00:00
|
|
|
static void do_paint_draw(Terminal *term, termline *ldata, int x, int y,
|
|
|
|
wchar_t *ch, int ccount,
|
|
|
|
unsigned long attr, truecolour tc)
|
|
|
|
{
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (ch[0] == TRUST_SIGIL_CHAR) {
|
|
|
|
assert(ldata->trusted);
|
|
|
|
assert(ccount == 1);
|
|
|
|
assert(attr & ATTR_WIDE);
|
|
|
|
wchar_t tch[2];
|
|
|
|
tch[0] = tch[1] = L' ';
|
|
|
|
win_draw_text(term->win, x, y, tch, 2, term->basic_erase_char.attr,
|
|
|
|
ldata->lattr, term->basic_erase_char.truecolour);
|
|
|
|
win_draw_trust_sigil(term->win, x, y);
|
|
|
|
} else {
|
Support Unicode flag glyphs in terminal.c (works in GTK).
This is the only one of the newly added cases in test/utf8.txt which I
can (try to) fix unilaterally just by changing PuTTY's display code,
because it doesn't change the number of character cells occupied by
the text, only the appearance of those cells.
In this commit I make the necessary changes in terminal.c, which makes
flags start working in GTK PuTTY and pterm, but not on Windows.
The system of encoding flags in Unicode is that there's a space of 26
regional-indicator letter code points (U+1F1E6 to U+1F1FF inclusive)
corresponding to the unaccented Latin alphabet, and an adjacent pair
of those letters represents the flag associated with that two-letter
code (usually a nation, although at least one non-nation pair exists,
namely EU).
There are two plausible ways we could handle this in terminal.c:
(a) leave the regional indicators as they are in the internal data
model, so that each RI letter occupies its own character cell,
and at display time have do_paint() spot adjacent pairs of them
and send each pair to the frontend as a combined glyph.
(b) combine the pairs _in_ the internal data model, by
special-casing them in term_display_graphic_char().
This choice makes a semantic difference. What if a flag is displayed
in the terminal and something overprints one of its two character
cells? With option (a), overprinting one cell of an RI pair with a
different RI letter would change it into a different flag; with
option (b), flags behave like any other wide character, in that
overprinting one of the two cells blanks the other as a side effect.
I think we need (a), because not all terminal redraw systems
(curses-style libraries) will understand the Unicode flag glyph system
at all. So if a full-screen application genuinely wants to do a screen
redraw in which a flag changes to a different flag while keeping one
of its constituent letters the same (say, swapping between BA and CA,
or between AC and AD), then the redraw library might very well
implement that screen update by redrawing only the changed letter, and
we need not to corrupt the flag.
All of this is now implemented in terminal.c. The effect is that pairs
of RI characters are passed to the TermWin draw_text() method as if
they were a wide character with a combining mark: that is, you get a
two-character (or four-surrogate) string, with TATTR_COMBINING
indicating that it represents a single glyph, and ATTR_WIDE indicating
that that glyph occupies two character cells rather than one.
In GTK, that's enough to make flag display Just Work. But on
Windows (at least the Win10 machine I have to test on), that doesn't
make flags start working all by itself. But then, the rest of the new
emoji tests also look a bit confused on Windows too. Help would be
welcome from someone who knows how Windows emoji display is supposed
to work!
2024-05-06 10:07:12 +00:00
|
|
|
if (ccount == 2 &&
|
|
|
|
IS_REGIONAL_INDICATOR_LETTER(ch[0]) &&
|
|
|
|
IS_REGIONAL_INDICATOR_LETTER(ch[1]))
|
|
|
|
attr |= ATTR_WIDE | TATTR_COMBINING;
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
win_draw_text(term->win, x, y, ch, ccount, attr, ldata->lattr, tc);
|
|
|
|
if (attr & (TATTR_ACTCURS | TATTR_PASCURS))
|
|
|
|
win_draw_cursor(term->win, x, y, ch, ccount,
|
|
|
|
attr, ldata->lattr, tc);
|
|
|
|
}
|
2019-02-26 07:12:13 +00:00
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
Remove unused and bit-rotted scroll optimisation.
In the very old days, when PuTTY was new and computers were slow, I
tried to implement a feature where scrolling the window would be
implemented using a fast rectangle-copy GDI operation, rather than an
expensive character-by-character redraw of all the changed areas.
It never quite worked right, and I ended up conditioning it out on
Windows, and never even tried to implement it on GTK. It's now been
sitting around unused for so long that I think it's no longer worth
keeping in the code at all - if I tried to put it back in, it surely
wouldn't even compile, and would need rewriting from scratch anyway.
Disturbingly, it looks as if I _tried_ to re-enable it at one point,
in that there was a '#define OPTIMISE_IS_SCROLL 1' in putty.h - but
that never had any effect, because the macro name is misspelled. All
the #ifdefs are for 'OPTIMISE_SCROLL', without the 'IS'. So despite
appearances, it really _has_ been conditioned out all along!
2018-10-25 17:34:39 +00:00
|
|
|
* Given a context, update the window.
|
1999-01-08 13:02:13 +00:00
|
|
|
*/
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
static void do_paint(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2004-11-28 09:24:57 +00:00
|
|
|
int i, j, our_curs_y, our_curs_x;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
int rv, cursor;
|
2001-04-16 21:25:13 +00:00
|
|
|
pos scrpos;
|
2004-10-14 16:42:43 +00:00
|
|
|
wchar_t *ch;
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
size_t chlen;
|
2004-12-17 11:37:16 +00:00
|
|
|
termchar *newline;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2004-10-14 16:42:43 +00:00
|
|
|
chlen = 1024;
|
|
|
|
ch = snewn(chlen, wchar_t);
|
|
|
|
|
2004-12-17 11:37:16 +00:00
|
|
|
newline = snewn(term->cols, termchar);
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
rv = (!term->rvideo ^ !term->in_vbell ? ATTR_REVERSE : 0);
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2001-04-13 10:52:36 +00:00
|
|
|
/* Depends on:
|
|
|
|
* screen array, disptop, scrtop,
|
2019-09-08 19:29:00 +00:00
|
|
|
* selection, rv,
|
|
|
|
* blinkpc, blink_is_real, tblinker,
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
* curs.y, curs.x, cblinker, blink_cur, cursor_on, has_focus, wrapnext
|
2001-04-13 10:52:36 +00:00
|
|
|
*/
|
2001-05-10 08:34:20 +00:00
|
|
|
|
|
|
|
/* Has the cursor position or type changed ? */
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->cursor_on) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->has_focus) {
|
|
|
|
if (term->cblinker || !term->blink_cur)
|
|
|
|
cursor = TATTR_ACTCURS;
|
|
|
|
else
|
|
|
|
cursor = 0;
|
|
|
|
} else
|
|
|
|
cursor = TATTR_PASCURS;
|
|
|
|
if (term->wrapnext)
|
|
|
|
cursor |= TATTR_RIGHTCURS;
|
2001-05-06 14:35:20 +00:00
|
|
|
} else
|
2019-09-08 19:29:00 +00:00
|
|
|
cursor = 0;
|
2002-10-22 16:11:33 +00:00
|
|
|
our_curs_y = term->curs.y - term->disptop;
|
2003-01-02 16:20:29 +00:00
|
|
|
{
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* Adjust the cursor position:
|
|
|
|
* - for bidi
|
|
|
|
* - in the case where it's resting on the right-hand half
|
|
|
|
* of a CJK wide character. xterm's behaviour here,
|
|
|
|
* which seems adequate to me, is to display the cursor
|
|
|
|
* covering the _whole_ character, exactly as if it were
|
|
|
|
* one space to the left.
|
|
|
|
*/
|
|
|
|
termline *ldata = lineptr(term->curs.y);
|
|
|
|
termchar *lchars;
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
our_curs_x = term->curs.x;
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if ( (lchars = term_bidi_line(term, ldata, our_curs_y)) != NULL) {
|
|
|
|
our_curs_x = term->post_bidi_cache[our_curs_y].forward[our_curs_x];
|
|
|
|
} else
|
|
|
|
lchars = ldata->chars;
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (our_curs_x > 0 &&
|
|
|
|
lchars[our_curs_x].chr == UCSWIDE)
|
|
|
|
our_curs_x--;
|
2004-11-28 09:24:57 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
unlineptr(ldata);
|
2003-01-02 16:20:29 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
/*
|
|
|
|
* If the cursor is not where it was last time we painted, and
|
|
|
|
* its previous position is visible on screen, invalidate its
|
|
|
|
* previous position.
|
|
|
|
*/
|
|
|
|
if (term->dispcursy >= 0 &&
|
2019-09-08 19:29:00 +00:00
|
|
|
(term->curstype != cursor ||
|
|
|
|
term->dispcursy != our_curs_y ||
|
|
|
|
term->dispcursx != our_curs_x)) {
|
|
|
|
termchar *dispcurs = term->disptext[term->dispcursy]->chars +
|
|
|
|
term->dispcursx;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->dispcursx > 0 && dispcurs->chr == UCSWIDE)
|
|
|
|
dispcurs[-1].attr |= ATTR_INVALID;
|
|
|
|
if (term->dispcursx < term->cols-1 && dispcurs[1].chr == UCSWIDE)
|
|
|
|
dispcurs[1].attr |= ATTR_INVALID;
|
|
|
|
dispcurs->attr |= ATTR_INVALID;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
term->curstype = 0;
|
2001-05-10 08:34:20 +00:00
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
term->dispcursx = term->dispcursy = -1;
|
2001-05-10 08:34:20 +00:00
|
|
|
|
|
|
|
/* The normal screen data */
|
2002-10-22 16:11:33 +00:00
|
|
|
for (i = 0; i < term->rows; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
termline *ldata;
|
|
|
|
termchar *lchars;
|
|
|
|
bool dirty_line, dirty_run, selected;
|
|
|
|
unsigned long attr = 0, cset = 0;
|
|
|
|
int start = 0;
|
|
|
|
int ccount = 0;
|
|
|
|
bool last_run_dirty = false;
|
|
|
|
int laststart;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool dirtyrect;
|
2019-09-08 19:29:00 +00:00
|
|
|
int *backward;
|
2017-09-30 16:32:32 +00:00
|
|
|
truecolour tc;
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
scrpos.y = i + term->disptop;
|
|
|
|
ldata = lineptr(scrpos.y);
|
|
|
|
|
|
|
|
/* Do Arabic shaping and bidi. */
|
|
|
|
lchars = term_bidi_line(term, ldata, i);
|
|
|
|
if (lchars) {
|
|
|
|
backward = term->post_bidi_cache[i].backward;
|
|
|
|
} else {
|
|
|
|
lchars = ldata->chars;
|
|
|
|
backward = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First loop: work along the line deciding what we want
|
|
|
|
* each character cell to look like.
|
|
|
|
*/
|
|
|
|
for (j = 0; j < term->cols; j++) {
|
|
|
|
unsigned long tattr, tchar;
|
|
|
|
termchar *d = lchars + j;
|
|
|
|
scrpos.x = backward ? backward[j] : j;
|
|
|
|
|
|
|
|
tchar = d->chr;
|
|
|
|
tattr = d->attr;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
if (!term->ansi_colour)
|
2019-09-08 19:29:00 +00:00
|
|
|
tattr = (tattr & ~(ATTR_FGMASK | ATTR_BGMASK)) |
|
2022-08-03 19:48:46 +00:00
|
|
|
ATTR_DEFFG | ATTR_DEFBG;
|
2004-11-09 17:57:32 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (!term->xterm_256_colour) {
|
|
|
|
int colour;
|
|
|
|
colour = (tattr & ATTR_FGMASK) >> ATTR_FGSHIFT;
|
|
|
|
if (colour >= 16 && colour < 256)
|
|
|
|
tattr = (tattr &~ ATTR_FGMASK) | ATTR_DEFFG;
|
|
|
|
colour = (tattr & ATTR_BGMASK) >> ATTR_BGSHIFT;
|
|
|
|
if (colour >= 16 && colour < 256)
|
|
|
|
tattr = (tattr &~ ATTR_BGMASK) | ATTR_DEFBG;
|
|
|
|
}
|
2004-11-28 15:13:34 +00:00
|
|
|
|
2017-10-05 19:27:27 +00:00
|
|
|
if (term->true_colour) {
|
|
|
|
tc = d->truecolour;
|
|
|
|
} else {
|
|
|
|
tc.fg = tc.bg = optionalrgb_none;
|
|
|
|
}
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
switch (tchar & CSET_MASK) {
|
|
|
|
case CSET_ASCII:
|
|
|
|
tchar = term->ucsdata->unitab_line[tchar & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_LINEDRW:
|
|
|
|
tchar = term->ucsdata->unitab_xterm[tchar & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_SCOACS:
|
|
|
|
tchar = term->ucsdata->unitab_scoacs[tchar&0xFF];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (j < term->cols-1 && d[1].chr == UCSWIDE)
|
|
|
|
tattr |= ATTR_WIDE;
|
|
|
|
|
|
|
|
/* Video reversing things */
|
|
|
|
if (term->selstate == DRAGGING || term->selstate == SELECTED) {
|
|
|
|
if (term->seltype == LEXICOGRAPHIC)
|
|
|
|
selected = (posle(term->selstart, scrpos) &&
|
|
|
|
poslt(scrpos, term->selend));
|
|
|
|
else
|
|
|
|
selected = (posPle(term->selstart, scrpos) &&
|
|
|
|
posPle_left(scrpos, term->selend));
|
|
|
|
} else
|
|
|
|
selected = false;
|
|
|
|
tattr = (tattr ^ rv
|
|
|
|
^ (selected ? ATTR_REVERSE : 0));
|
|
|
|
|
|
|
|
/* 'Real' blinking ? */
|
|
|
|
if (term->blink_is_real && (tattr & ATTR_BLINK)) {
|
|
|
|
if (term->has_focus && term->tblinker) {
|
|
|
|
tchar = term->ucsdata->unitab_line[(unsigned char)' '];
|
|
|
|
}
|
|
|
|
tattr &= ~ATTR_BLINK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the font we'll _probably_ be using to see if
|
|
|
|
* the character is wide when we don't want it to be.
|
|
|
|
*/
|
|
|
|
if (tchar != term->disptext[i]->chars[j].chr ||
|
|
|
|
tattr != (term->disptext[i]->chars[j].attr &~
|
|
|
|
(ATTR_NARROW | DATTR_MASK))) {
|
|
|
|
if ((tattr & ATTR_WIDE) == 0 &&
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
win_char_width(term->win, tchar) == 2)
|
2019-09-08 19:29:00 +00:00
|
|
|
tattr |= ATTR_NARROW;
|
|
|
|
} else if (term->disptext[i]->chars[j].attr & ATTR_NARROW)
|
|
|
|
tattr |= ATTR_NARROW;
|
|
|
|
|
|
|
|
if (i == our_curs_y && j == our_curs_x) {
|
|
|
|
tattr |= cursor;
|
|
|
|
term->curstype = cursor;
|
|
|
|
term->dispcursx = j;
|
|
|
|
term->dispcursy = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FULL-TERMCHAR */
|
|
|
|
newline[j].attr = tattr;
|
|
|
|
newline[j].chr = tchar;
|
|
|
|
newline[j].truecolour = tc;
|
|
|
|
/* Combining characters are still read from lchars */
|
|
|
|
newline[j].cc_next = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now loop over the line again, noting where things have
|
|
|
|
* changed.
|
|
|
|
*
|
|
|
|
* During this loop, we keep track of where we last saw
|
|
|
|
* DATTR_STARTRUN. Any mismatch automatically invalidates
|
|
|
|
* _all_ of the containing run that was last printed: that
|
|
|
|
* is, any rectangle that was drawn in one go in the
|
|
|
|
* previous update should be either left completely alone
|
|
|
|
* or overwritten in its entirety. This, along with the
|
|
|
|
* expectation that front ends clip all text runs to their
|
|
|
|
* bounding rectangle, should solve any possible problems
|
|
|
|
* with fonts that overflow their character cells.
|
|
|
|
*/
|
|
|
|
laststart = 0;
|
|
|
|
dirtyrect = false;
|
|
|
|
for (j = 0; j < term->cols; j++) {
|
|
|
|
if (term->disptext[i]->chars[j].attr & DATTR_STARTRUN) {
|
|
|
|
laststart = j;
|
|
|
|
dirtyrect = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (term->disptext[i]->chars[j].chr != newline[j].chr ||
|
|
|
|
(term->disptext[i]->chars[j].attr &~ DATTR_MASK)
|
|
|
|
!= newline[j].attr) {
|
|
|
|
int k;
|
|
|
|
|
|
|
|
if (!dirtyrect) {
|
|
|
|
for (k = laststart; k < j; k++)
|
|
|
|
term->disptext[i]->chars[k].attr |= ATTR_INVALID;
|
|
|
|
|
|
|
|
dirtyrect = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dirtyrect)
|
|
|
|
term->disptext[i]->chars[j].attr |= ATTR_INVALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finally, loop once more and actually do the drawing.
|
|
|
|
*/
|
|
|
|
dirty_run = dirty_line = (ldata->lattr !=
|
|
|
|
term->disptext[i]->lattr);
|
|
|
|
term->disptext[i]->lattr = ldata->lattr;
|
|
|
|
|
|
|
|
tc = term->erase_char.truecolour;
|
|
|
|
for (j = 0; j < term->cols; j++) {
|
|
|
|
unsigned long tattr, tchar;
|
Support Unicode flag glyphs in terminal.c (works in GTK).
This is the only one of the newly added cases in test/utf8.txt which I
can (try to) fix unilaterally just by changing PuTTY's display code,
because it doesn't change the number of character cells occupied by
the text, only the appearance of those cells.
In this commit I make the necessary changes in terminal.c, which makes
flags start working in GTK PuTTY and pterm, but not on Windows.
The system of encoding flags in Unicode is that there's a space of 26
regional-indicator letter code points (U+1F1E6 to U+1F1FF inclusive)
corresponding to the unaccented Latin alphabet, and an adjacent pair
of those letters represents the flag associated with that two-letter
code (usually a nation, although at least one non-nation pair exists,
namely EU).
There are two plausible ways we could handle this in terminal.c:
(a) leave the regional indicators as they are in the internal data
model, so that each RI letter occupies its own character cell,
and at display time have do_paint() spot adjacent pairs of them
and send each pair to the frontend as a combined glyph.
(b) combine the pairs _in_ the internal data model, by
special-casing them in term_display_graphic_char().
This choice makes a semantic difference. What if a flag is displayed
in the terminal and something overprints one of its two character
cells? With option (a), overprinting one cell of an RI pair with a
different RI letter would change it into a different flag; with
option (b), flags behave like any other wide character, in that
overprinting one of the two cells blanks the other as a side effect.
I think we need (a), because not all terminal redraw systems
(curses-style libraries) will understand the Unicode flag glyph system
at all. So if a full-screen application genuinely wants to do a screen
redraw in which a flag changes to a different flag while keeping one
of its constituent letters the same (say, swapping between BA and CA,
or between AC and AD), then the redraw library might very well
implement that screen update by redrawing only the changed letter, and
we need not to corrupt the flag.
All of this is now implemented in terminal.c. The effect is that pairs
of RI characters are passed to the TermWin draw_text() method as if
they were a wide character with a combining mark: that is, you get a
two-character (or four-surrogate) string, with TATTR_COMBINING
indicating that it represents a single glyph, and ATTR_WIDE indicating
that that glyph occupies two character cells rather than one.
In GTK, that's enough to make flag display Just Work. But on
Windows (at least the Win10 machine I have to test on), that doesn't
make flags start working all by itself. But then, the rest of the new
emoji tests also look a bit confused on Windows too. Help would be
welcome from someone who knows how Windows emoji display is supposed
to work!
2024-05-06 10:07:12 +00:00
|
|
|
bool break_run, do_copy, next_run_dirty = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
termchar *d = lchars + j;
|
|
|
|
|
|
|
|
tattr = newline[j].attr;
|
|
|
|
tchar = newline[j].chr;
|
|
|
|
|
|
|
|
if ((term->disptext[i]->chars[j].attr ^ tattr) & ATTR_WIDE)
|
|
|
|
dirty_line = true;
|
|
|
|
|
|
|
|
break_run = ((tattr ^ attr) & term->attr_mask) != 0;
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2017-09-30 16:32:32 +00:00
|
|
|
if (!truecolour_equal(newline[j].truecolour, tc))
|
2018-10-29 19:50:29 +00:00
|
|
|
break_run = true;
|
2017-09-30 16:32:32 +00:00
|
|
|
|
2012-04-22 14:22:10 +00:00
|
|
|
#ifdef USES_VTLINE_HACK
|
2019-09-08 19:29:00 +00:00
|
|
|
/* Special hack for VT100 Linedraw glyphs */
|
|
|
|
if ((tchar >= 0x23BA && tchar <= 0x23BD) ||
|
2011-07-16 11:13:00 +00:00
|
|
|
(j > 0 && (newline[j-1].chr >= 0x23BA &&
|
|
|
|
newline[j-1].chr <= 0x23BD)))
|
2019-09-08 19:29:00 +00:00
|
|
|
break_run = true;
|
2012-04-22 14:22:10 +00:00
|
|
|
#endif
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* Separate out sequences of characters that have the
|
|
|
|
* same CSET, if that CSET is a magic one.
|
|
|
|
*/
|
|
|
|
if (CSET_OF(tchar) != cset)
|
|
|
|
break_run = true;
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* Break on both sides of any combined-character cell.
|
|
|
|
*/
|
|
|
|
if (d->cc_next != 0 ||
|
|
|
|
(j > 0 && d[-1].cc_next != 0))
|
|
|
|
break_run = true;
|
2004-10-14 16:42:43 +00:00
|
|
|
|
Support Unicode flag glyphs in terminal.c (works in GTK).
This is the only one of the newly added cases in test/utf8.txt which I
can (try to) fix unilaterally just by changing PuTTY's display code,
because it doesn't change the number of character cells occupied by
the text, only the appearance of those cells.
In this commit I make the necessary changes in terminal.c, which makes
flags start working in GTK PuTTY and pterm, but not on Windows.
The system of encoding flags in Unicode is that there's a space of 26
regional-indicator letter code points (U+1F1E6 to U+1F1FF inclusive)
corresponding to the unaccented Latin alphabet, and an adjacent pair
of those letters represents the flag associated with that two-letter
code (usually a nation, although at least one non-nation pair exists,
namely EU).
There are two plausible ways we could handle this in terminal.c:
(a) leave the regional indicators as they are in the internal data
model, so that each RI letter occupies its own character cell,
and at display time have do_paint() spot adjacent pairs of them
and send each pair to the frontend as a combined glyph.
(b) combine the pairs _in_ the internal data model, by
special-casing them in term_display_graphic_char().
This choice makes a semantic difference. What if a flag is displayed
in the terminal and something overprints one of its two character
cells? With option (a), overprinting one cell of an RI pair with a
different RI letter would change it into a different flag; with
option (b), flags behave like any other wide character, in that
overprinting one of the two cells blanks the other as a side effect.
I think we need (a), because not all terminal redraw systems
(curses-style libraries) will understand the Unicode flag glyph system
at all. So if a full-screen application genuinely wants to do a screen
redraw in which a flag changes to a different flag while keeping one
of its constituent letters the same (say, swapping between BA and CA,
or between AC and AD), then the redraw library might very well
implement that screen update by redrawing only the changed letter, and
we need not to corrupt the flag.
All of this is now implemented in terminal.c. The effect is that pairs
of RI characters are passed to the TermWin draw_text() method as if
they were a wide character with a combining mark: that is, you get a
two-character (or four-surrogate) string, with TATTR_COMBINING
indicating that it represents a single glyph, and ATTR_WIDE indicating
that that glyph occupies two character cells rather than one.
In GTK, that's enough to make flag display Just Work. But on
Windows (at least the Win10 machine I have to test on), that doesn't
make flags start working all by itself. But then, the rest of the new
emoji tests also look a bit confused on Windows too. Help would be
welcome from someone who knows how Windows emoji display is supposed
to work!
2024-05-06 10:07:12 +00:00
|
|
|
/*
|
|
|
|
* Break on both sides of a regional indicator letter.
|
|
|
|
*/
|
|
|
|
if (IS_REGIONAL_INDICATOR_LETTER(tchar)) {
|
|
|
|
break_run = true;
|
|
|
|
if (j+1 < term->cols) {
|
|
|
|
/* Also, check if there are any changes to whether or
|
|
|
|
* not we're drawing this and the next character as a
|
|
|
|
* single flag glyph. */
|
|
|
|
bool flag_now = IS_REGIONAL_INDICATOR_LETTER(d[1].chr);
|
|
|
|
bool flag_before = (
|
|
|
|
IS_REGIONAL_INDICATOR_LETTER(
|
|
|
|
term->disptext[i]->chars[j].chr) &&
|
|
|
|
IS_REGIONAL_INDICATOR_LETTER(
|
|
|
|
term->disptext[i]->chars[j+1].chr) &&
|
|
|
|
(term->disptext[i]->chars[j].attr & DATTR_STARTRUN));
|
|
|
|
if (flag_now != flag_before)
|
|
|
|
next_run_dirty = true; /* must redraw this flag */
|
|
|
|
}
|
|
|
|
} else if (j>0 && IS_REGIONAL_INDICATOR_LETTER(d[-1].chr)) {
|
|
|
|
break_run = true;
|
|
|
|
}
|
|
|
|
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
/*
|
|
|
|
* Break on both sides of a trust sigil.
|
|
|
|
*/
|
|
|
|
if (d->chr == TRUST_SIGIL_CHAR ||
|
|
|
|
(j >= 2 && d[-1].chr == UCSWIDE &&
|
|
|
|
d[-2].chr == TRUST_SIGIL_CHAR))
|
|
|
|
break_run = true;
|
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (!term->ucsdata->dbcs_screenfont && !dirty_line) {
|
|
|
|
if (term->disptext[i]->chars[j].chr == tchar &&
|
Fix terminal redraw slowdown in presence of true colour.
When do_paint breaks up a line of terminal text into contiguous runs
of characters to treat the same, one of the criteria it uses is, 'Does
this character even need redrawing? (or is it already displayed
correctly from the previous redraw?)' When we encounter a character
that matches its previous value, we end the previous run of
characters, so that we can skip the one we've just encountered.
That check was not taking account of the 'truecolour' field of the
termchar it was checking. So it would sometimes falsely believe the
character to be equivalent to its previously drawn value, even when in
fact it was not, and hence insert a run break, anticipating that the
previous character needed drawing and the current one did not.
This didn't cause a _wrong_ redraw, because there's a separate loop
further on which re-checks whether to actually draw things, which
didn't make the same error. So the character that loop #1 thought
didn't need a redraw, loop #2 knew _did_ need a redraw, and hence,
everything did get redrawn.
But by the time loop #2 is running, it's too late to change the run
boundaries. So everything does get redrawn, but in much smaller chunks
than it could have been. The net effect was that if the screen was
filled with text displayed in true colour, and you changed it to the
_same_ text in a different colour, then the whole terminal would be
redrawn in one-character increments instead of the usual behaviour of
folding together runs that can be drawn in one go.
Thanks to Bradley Smith for debugging this very confusing issue!
2021-07-15 19:02:55 +00:00
|
|
|
(term->disptext[i]->chars[j].attr &~ DATTR_MASK)==tattr &&
|
|
|
|
truecolour_equal(
|
|
|
|
term->disptext[i]->chars[j].truecolour, tc))
|
2019-09-08 19:29:00 +00:00
|
|
|
break_run = true;
|
|
|
|
else if (!dirty_run && ccount == 1)
|
|
|
|
break_run = true;
|
|
|
|
}
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (break_run) {
|
|
|
|
if ((dirty_run || last_run_dirty) && ccount > 0)
|
2019-02-26 07:12:13 +00:00
|
|
|
do_paint_draw(term, ldata, start, i, ch, ccount, attr, tc);
|
2019-09-08 19:29:00 +00:00
|
|
|
start = j;
|
|
|
|
ccount = 0;
|
|
|
|
attr = tattr;
|
|
|
|
tc = newline[j].truecolour;
|
|
|
|
cset = CSET_OF(tchar);
|
|
|
|
if (term->ucsdata->dbcs_screenfont)
|
|
|
|
last_run_dirty = dirty_run;
|
Support Unicode flag glyphs in terminal.c (works in GTK).
This is the only one of the newly added cases in test/utf8.txt which I
can (try to) fix unilaterally just by changing PuTTY's display code,
because it doesn't change the number of character cells occupied by
the text, only the appearance of those cells.
In this commit I make the necessary changes in terminal.c, which makes
flags start working in GTK PuTTY and pterm, but not on Windows.
The system of encoding flags in Unicode is that there's a space of 26
regional-indicator letter code points (U+1F1E6 to U+1F1FF inclusive)
corresponding to the unaccented Latin alphabet, and an adjacent pair
of those letters represents the flag associated with that two-letter
code (usually a nation, although at least one non-nation pair exists,
namely EU).
There are two plausible ways we could handle this in terminal.c:
(a) leave the regional indicators as they are in the internal data
model, so that each RI letter occupies its own character cell,
and at display time have do_paint() spot adjacent pairs of them
and send each pair to the frontend as a combined glyph.
(b) combine the pairs _in_ the internal data model, by
special-casing them in term_display_graphic_char().
This choice makes a semantic difference. What if a flag is displayed
in the terminal and something overprints one of its two character
cells? With option (a), overprinting one cell of an RI pair with a
different RI letter would change it into a different flag; with
option (b), flags behave like any other wide character, in that
overprinting one of the two cells blanks the other as a side effect.
I think we need (a), because not all terminal redraw systems
(curses-style libraries) will understand the Unicode flag glyph system
at all. So if a full-screen application genuinely wants to do a screen
redraw in which a flag changes to a different flag while keeping one
of its constituent letters the same (say, swapping between BA and CA,
or between AC and AD), then the redraw library might very well
implement that screen update by redrawing only the changed letter, and
we need not to corrupt the flag.
All of this is now implemented in terminal.c. The effect is that pairs
of RI characters are passed to the TermWin draw_text() method as if
they were a wide character with a combining mark: that is, you get a
two-character (or four-surrogate) string, with TATTR_COMBINING
indicating that it represents a single glyph, and ATTR_WIDE indicating
that that glyph occupies two character cells rather than one.
In GTK, that's enough to make flag display Just Work. But on
Windows (at least the Win10 machine I have to test on), that doesn't
make flags start working all by itself. But then, the rest of the new
emoji tests also look a bit confused on Windows too. Help would be
welcome from someone who knows how Windows emoji display is supposed
to work!
2024-05-06 10:07:12 +00:00
|
|
|
dirty_run = dirty_line || next_run_dirty;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
do_copy = false;
|
|
|
|
if (!termchars_equal_override(&term->disptext[i]->chars[j],
|
|
|
|
d, tchar, tattr)) {
|
|
|
|
do_copy = true;
|
|
|
|
dirty_run = true;
|
|
|
|
}
|
2004-10-14 16:42:43 +00:00
|
|
|
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
sgrowarrayn(ch, chlen, ccount, 2);
|
2012-02-17 19:28:55 +00:00
|
|
|
|
|
|
|
#ifdef PLATFORM_IS_UTF16
|
2019-09-08 19:29:00 +00:00
|
|
|
if (tchar > 0x10000 && tchar < 0x110000) {
|
|
|
|
ch[ccount++] = (wchar_t) HIGH_SURROGATE_OF(tchar);
|
|
|
|
ch[ccount++] = (wchar_t) LOW_SURROGATE_OF(tchar);
|
|
|
|
} else
|
2012-02-17 19:28:55 +00:00
|
|
|
#endif /* PLATFORM_IS_UTF16 */
|
2019-09-08 19:29:00 +00:00
|
|
|
ch[ccount++] = (wchar_t) tchar;
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (d->cc_next) {
|
|
|
|
termchar *dd = d;
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
while (dd->cc_next) {
|
|
|
|
unsigned long schar;
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
dd += dd->cc_next;
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
schar = dd->chr;
|
|
|
|
switch (schar & CSET_MASK) {
|
|
|
|
case CSET_ASCII:
|
|
|
|
schar = term->ucsdata->unitab_line[schar & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_LINEDRW:
|
|
|
|
schar = term->ucsdata->unitab_xterm[schar & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_SCOACS:
|
|
|
|
schar = term->ucsdata->unitab_scoacs[schar&0xFF];
|
|
|
|
break;
|
|
|
|
}
|
2004-10-14 16:42:43 +00:00
|
|
|
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
sgrowarrayn(ch, chlen, ccount, 2);
|
2012-02-17 19:28:55 +00:00
|
|
|
|
|
|
|
#ifdef PLATFORM_IS_UTF16
|
2019-09-08 19:29:00 +00:00
|
|
|
if (schar > 0x10000 && schar < 0x110000) {
|
|
|
|
ch[ccount++] = (wchar_t) HIGH_SURROGATE_OF(schar);
|
|
|
|
ch[ccount++] = (wchar_t) LOW_SURROGATE_OF(schar);
|
|
|
|
} else
|
2012-02-17 19:28:55 +00:00
|
|
|
#endif /* PLATFORM_IS_UTF16 */
|
2019-09-08 19:29:00 +00:00
|
|
|
ch[ccount++] = (wchar_t) schar;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr |= TATTR_COMBINING;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (do_copy) {
|
|
|
|
copy_termchar(term->disptext[i], j, d);
|
|
|
|
term->disptext[i]->chars[j].chr = tchar;
|
|
|
|
term->disptext[i]->chars[j].attr = tattr;
|
|
|
|
term->disptext[i]->chars[j].truecolour = tc;
|
|
|
|
if (start == j)
|
|
|
|
term->disptext[i]->chars[j].attr |= DATTR_STARTRUN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If it's a wide char step along to the next one. */
|
|
|
|
if (tattr & ATTR_WIDE) {
|
|
|
|
if (++j < term->cols) {
|
|
|
|
d++;
|
|
|
|
/*
|
|
|
|
* By construction above, the cursor should not
|
|
|
|
* be on the right-hand half of this character.
|
|
|
|
* Ever.
|
|
|
|
*/
|
|
|
|
assert(!(i == our_curs_y && j == our_curs_x));
|
|
|
|
if (!termchars_equal(&term->disptext[i]->chars[j], d))
|
|
|
|
dirty_run = true;
|
|
|
|
copy_termchar(term->disptext[i], j, d);
|
|
|
|
}
|
|
|
|
}
|
Support Unicode flag glyphs in terminal.c (works in GTK).
This is the only one of the newly added cases in test/utf8.txt which I
can (try to) fix unilaterally just by changing PuTTY's display code,
because it doesn't change the number of character cells occupied by
the text, only the appearance of those cells.
In this commit I make the necessary changes in terminal.c, which makes
flags start working in GTK PuTTY and pterm, but not on Windows.
The system of encoding flags in Unicode is that there's a space of 26
regional-indicator letter code points (U+1F1E6 to U+1F1FF inclusive)
corresponding to the unaccented Latin alphabet, and an adjacent pair
of those letters represents the flag associated with that two-letter
code (usually a nation, although at least one non-nation pair exists,
namely EU).
There are two plausible ways we could handle this in terminal.c:
(a) leave the regional indicators as they are in the internal data
model, so that each RI letter occupies its own character cell,
and at display time have do_paint() spot adjacent pairs of them
and send each pair to the frontend as a combined glyph.
(b) combine the pairs _in_ the internal data model, by
special-casing them in term_display_graphic_char().
This choice makes a semantic difference. What if a flag is displayed
in the terminal and something overprints one of its two character
cells? With option (a), overprinting one cell of an RI pair with a
different RI letter would change it into a different flag; with
option (b), flags behave like any other wide character, in that
overprinting one of the two cells blanks the other as a side effect.
I think we need (a), because not all terminal redraw systems
(curses-style libraries) will understand the Unicode flag glyph system
at all. So if a full-screen application genuinely wants to do a screen
redraw in which a flag changes to a different flag while keeping one
of its constituent letters the same (say, swapping between BA and CA,
or between AC and AD), then the redraw library might very well
implement that screen update by redrawing only the changed letter, and
we need not to corrupt the flag.
All of this is now implemented in terminal.c. The effect is that pairs
of RI characters are passed to the TermWin draw_text() method as if
they were a wide character with a combining mark: that is, you get a
two-character (or four-surrogate) string, with TATTR_COMBINING
indicating that it represents a single glyph, and ATTR_WIDE indicating
that that glyph occupies two character cells rather than one.
In GTK, that's enough to make flag display Just Work. But on
Windows (at least the Win10 machine I have to test on), that doesn't
make flags start working all by itself. But then, the rest of the new
emoji tests also look a bit confused on Windows too. Help would be
welcome from someone who knows how Windows emoji display is supposed
to work!
2024-05-06 10:07:12 +00:00
|
|
|
|
|
|
|
/* If it's a regional indicator letter, and so is the next
|
|
|
|
* one, then also step to the next one, keeping the flag
|
|
|
|
* sequence together. */
|
|
|
|
if (IS_REGIONAL_INDICATOR_LETTER(d->chr) &&
|
|
|
|
(j+1 < term->cols && IS_REGIONAL_INDICATOR_LETTER(d[1].chr))) {
|
|
|
|
j++;
|
|
|
|
d++;
|
|
|
|
|
|
|
|
/* Set ATTR_WIDE, so that the pair is displayed as one */
|
|
|
|
attr |= ATTR_WIDE;
|
|
|
|
|
|
|
|
/* Include the second letter in the text buffer */
|
|
|
|
unsigned long rchar = d->chr;
|
|
|
|
#ifdef PLATFORM_IS_UTF16
|
|
|
|
sgrowarrayn(ch, chlen, ccount, 2);
|
|
|
|
ch[ccount++] = (wchar_t)HIGH_SURROGATE_OF(rchar);
|
|
|
|
ch[ccount++] = (wchar_t)LOW_SURROGATE_OF(rchar);
|
|
|
|
#else
|
|
|
|
sgrowarrayn(ch, chlen, ccount, 1);
|
|
|
|
ch[ccount++] = (wchar_t)rchar;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Display the cursor, if it's on the right half */
|
|
|
|
if (i == our_curs_y && j == our_curs_x) {
|
|
|
|
attr |= cursor;
|
|
|
|
term->disptext[i]->chars[j-1].attr |= cursor;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!termchars_equal_override(
|
|
|
|
&term->disptext[i]->chars[j],
|
|
|
|
d, rchar, term->disptext[i]->chars[j-1].attr))
|
|
|
|
dirty_run = true;
|
|
|
|
|
|
|
|
copy_termchar(term->disptext[i], j, d);
|
|
|
|
term->disptext[i]->chars[j].attr =
|
|
|
|
term->disptext[i]->chars[j-1].attr & ~DATTR_STARTRUN;
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
if (dirty_run && ccount > 0)
|
2019-02-26 07:12:13 +00:00
|
|
|
do_paint_draw(term, ldata, start, i, ch, ccount, attr, tc);
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
unlineptr(ldata);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
2004-10-14 16:42:43 +00:00
|
|
|
|
2004-12-17 11:37:16 +00:00
|
|
|
sfree(newline);
|
2004-10-14 16:42:43 +00:00
|
|
|
sfree(ch);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Invalidate the whole screen so it will be repainted in full.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
void term_invalidate(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
int i, j;
|
1999-01-08 13:02:13 +00:00
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
for (i = 0; i < term->rows; i++)
|
2019-09-08 19:29:00 +00:00
|
|
|
for (j = 0; j < term->cols; j++)
|
|
|
|
term->disptext[i]->chars[j].attr |= ATTR_INVALID;
|
2004-11-28 15:13:34 +00:00
|
|
|
|
|
|
|
term_schedule_update(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Paint the window in response to a WM_PAINT message.
|
|
|
|
*/
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
void term_paint(Terminal *term,
|
2019-09-08 19:29:00 +00:00
|
|
|
int left, int top, int right, int bottom, bool immediately)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2001-09-15 15:54:24 +00:00
|
|
|
int i, j;
|
|
|
|
if (left < 0) left = 0;
|
|
|
|
if (top < 0) top = 0;
|
2002-10-22 16:11:33 +00:00
|
|
|
if (right >= term->cols) right = term->cols-1;
|
|
|
|
if (bottom >= term->rows) bottom = term->rows-1;
|
|
|
|
|
|
|
|
for (i = top; i <= bottom && i < term->rows; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if ((term->disptext[i]->lattr & LATTR_MODE) == LATTR_NORM)
|
|
|
|
for (j = left; j <= right && j < term->cols; j++)
|
|
|
|
term->disptext[i]->chars[j].attr |= ATTR_INVALID;
|
|
|
|
else
|
|
|
|
for (j = left / 2; j <= right / 2 + 1 && j < term->cols; j++)
|
|
|
|
term->disptext[i]->chars[j].attr |= ATTR_INVALID;
|
2000-07-26 12:13:51 +00:00
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2004-11-27 13:20:21 +00:00
|
|
|
if (immediately) {
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
do_paint(term);
|
2004-11-27 13:20:21 +00:00
|
|
|
} else {
|
2019-09-08 19:29:00 +00:00
|
|
|
term_schedule_update(term);
|
2004-11-27 13:20:21 +00:00
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to scroll the scrollback. The second parameter gives the
|
|
|
|
* position we want to scroll to; the first is +1 to denote that
|
|
|
|
* this position is relative to the beginning of the scrollback, -1
|
|
|
|
* to denote it is relative to the end, and 0 to denote that it is
|
|
|
|
* relative to the current position.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
void term_scroll(Terminal *term, int rel, int where)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2003-03-06 12:51:12 +00:00
|
|
|
int sbtop = -sblines(term);
|
2002-10-22 16:11:33 +00:00
|
|
|
|
|
|
|
term->disptop = (rel < 0 ? 0 : rel > 0 ? sbtop : term->disptop) + where;
|
|
|
|
if (term->disptop < sbtop)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->disptop = sbtop;
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->disptop > 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
term->disptop = 0;
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term->win_scrollbar_update_pending = true;
|
|
|
|
term_schedule_update(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2010-04-23 18:32:15 +00:00
|
|
|
/*
|
|
|
|
* Scroll the scrollback to centre it on the beginning or end of the
|
|
|
|
* current selection, if any.
|
|
|
|
*/
|
|
|
|
void term_scroll_to_selection(Terminal *term, int which_end)
|
|
|
|
{
|
|
|
|
pos target;
|
|
|
|
int y;
|
|
|
|
int sbtop = -sblines(term);
|
|
|
|
|
|
|
|
if (term->selstate != SELECTED)
|
2019-09-08 19:29:00 +00:00
|
|
|
return;
|
2010-04-23 18:32:15 +00:00
|
|
|
if (which_end)
|
2019-09-08 19:29:00 +00:00
|
|
|
target = term->selend;
|
2010-04-23 18:32:15 +00:00
|
|
|
else
|
2019-09-08 19:29:00 +00:00
|
|
|
target = term->selstart;
|
2010-04-23 18:32:15 +00:00
|
|
|
|
|
|
|
y = target.y - term->rows/2;
|
|
|
|
if (y < sbtop)
|
2019-09-08 19:29:00 +00:00
|
|
|
y = sbtop;
|
2010-04-23 18:32:15 +00:00
|
|
|
else if (y > 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
y = 0;
|
2010-04-23 18:32:15 +00:00
|
|
|
term_scroll(term, -1, y);
|
|
|
|
}
|
|
|
|
|
2006-02-19 01:37:45 +00:00
|
|
|
/*
|
|
|
|
* Helper routine for clipme(): growing buffer.
|
|
|
|
*/
|
|
|
|
typedef struct {
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
size_t bufsize; /* amount of allocated space in textbuf/attrbuf */
|
|
|
|
size_t bufpos; /* amount of actual data */
|
2019-09-08 19:29:00 +00:00
|
|
|
wchar_t *textbuf; /* buffer for copied text */
|
|
|
|
wchar_t *textptr; /* = textbuf + bufpos (current insertion point) */
|
|
|
|
int *attrbuf; /* buffer for copied attributes */
|
|
|
|
int *attrptr; /* = attrbuf + bufpos */
|
|
|
|
truecolour *tcbuf; /* buffer for copied colours */
|
|
|
|
truecolour *tcptr; /* = tcbuf + bufpos */
|
2006-02-19 01:37:45 +00:00
|
|
|
} clip_workbuf;
|
|
|
|
|
2017-06-14 13:11:05 +00:00
|
|
|
static void clip_addchar(clip_workbuf *b, wchar_t chr, int attr, truecolour tc)
|
2006-02-19 01:37:45 +00:00
|
|
|
{
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
if (b->bufpos >= b->bufsize) {
|
|
|
|
sgrowarray(b->textbuf, b->bufsize, b->bufpos);
|
2019-09-08 19:29:00 +00:00
|
|
|
b->textptr = b->textbuf + b->bufpos;
|
|
|
|
b->attrbuf = sresize(b->attrbuf, b->bufsize, int);
|
|
|
|
b->attrptr = b->attrbuf + b->bufpos;
|
|
|
|
b->tcbuf = sresize(b->tcbuf, b->bufsize, truecolour);
|
|
|
|
b->tcptr = b->tcbuf + b->bufpos;
|
2006-02-19 01:37:45 +00:00
|
|
|
}
|
|
|
|
*b->textptr++ = chr;
|
|
|
|
*b->attrptr++ = attr;
|
2017-06-14 13:11:05 +00:00
|
|
|
*b->tcptr++ = tc;
|
2006-02-19 01:37:45 +00:00
|
|
|
b->bufpos++;
|
|
|
|
}
|
|
|
|
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static void clipme(Terminal *term, pos top, pos bottom, bool rect, bool desel,
|
2017-12-10 15:45:45 +00:00
|
|
|
const int *clipboards, int n_clipboards)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2006-02-19 01:37:45 +00:00
|
|
|
clip_workbuf buf;
|
2001-10-31 18:50:09 +00:00
|
|
|
int old_top_x;
|
2006-02-13 22:18:17 +00:00
|
|
|
int attr;
|
2017-06-14 13:11:05 +00:00
|
|
|
truecolour tc;
|
2000-11-21 19:28:25 +00:00
|
|
|
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
buf.bufsize = 5120;
|
2006-02-19 01:37:45 +00:00
|
|
|
buf.bufpos = 0;
|
New array-growing macros: sgrowarray and sgrowarrayn.
The idea of these is that they centralise the common idiom along the
lines of
if (logical_array_len >= physical_array_size) {
physical_array_size = logical_array_len * 5 / 4 + 256;
array = sresize(array, physical_array_size, ElementType);
}
which happens at a zillion call sites throughout this code base, with
different random choices of the geometric factor and additive
constant, sometimes forgetting them completely, and generally doing a
lot of repeated work.
The new macro sgrowarray(array,size,n) has the semantics: here are the
array pointer and its physical size for you to modify, now please
ensure that the nth element exists, so I can write into it. And
sgrowarrayn(array,size,n,m) is the same except that it ensures that
the array has size at least n+m (so sgrowarray is just the special
case where m=1).
Now that this is a single centralised implementation that will be used
everywhere, I've also gone to more effort in the implementation, with
careful overflow checks that would have been painful to put at all the
previous call sites.
This commit also switches over every use of sresize(), apart from a
few where I really didn't think it would gain anything. A consequence
of that is that a lot of array-size variables have to have their types
changed to size_t, because the macros require that (they address-take
the size to pass to the underlying function).
2019-02-28 20:07:30 +00:00
|
|
|
buf.textptr = buf.textbuf = snewn(buf.bufsize, wchar_t);
|
|
|
|
buf.attrptr = buf.attrbuf = snewn(buf.bufsize, int);
|
|
|
|
buf.tcptr = buf.tcbuf = snewn(buf.bufsize, truecolour);
|
2006-02-19 01:37:45 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
old_top_x = top.x; /* needed for rect==1 */
|
2000-11-21 19:28:25 +00:00
|
|
|
|
2001-04-16 21:25:13 +00:00
|
|
|
while (poslt(top, bottom)) {
|
2019-09-08 19:29:00 +00:00
|
|
|
bool nl = false;
|
|
|
|
termline *ldata = lineptr(top.y);
|
|
|
|
pos nlpos;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nlpos will point at the maximum position on this line we
|
|
|
|
* should copy up to. So we start it at the end of the
|
|
|
|
* line...
|
|
|
|
*/
|
|
|
|
nlpos.y = top.y;
|
|
|
|
nlpos.x = term->cols;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ... move it backwards if there's unused space at the end
|
|
|
|
* of the line (and also set `nl' if this is the case,
|
|
|
|
* because in normal selection mode this means we need a
|
|
|
|
* newline at the end)...
|
|
|
|
*/
|
|
|
|
if (!(ldata->lattr & LATTR_WRAPPED)) {
|
|
|
|
while (nlpos.x &&
|
|
|
|
IS_SPACE_CHR(ldata->chars[nlpos.x - 1].chr) &&
|
|
|
|
!ldata->chars[nlpos.x - 1].cc_next &&
|
|
|
|
poslt(top, nlpos))
|
|
|
|
decpos(nlpos);
|
|
|
|
if (poslt(nlpos, bottom))
|
|
|
|
nl = true;
|
|
|
|
} else {
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (ldata->trusted) {
|
|
|
|
/* A wrapped line with a trust sigil on it terminates
|
|
|
|
* a few characters earlier. */
|
|
|
|
nlpos.x = (nlpos.x < TRUST_SIGIL_WIDTH ? 0 :
|
|
|
|
nlpos.x - TRUST_SIGIL_WIDTH);
|
|
|
|
}
|
|
|
|
if (ldata->lattr & LATTR_WRAPPED2) {
|
|
|
|
/* Ignore the last char on the line in a WRAPPED2 line. */
|
|
|
|
decpos(nlpos);
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ... and then clip it to the terminal x coordinate if
|
|
|
|
* we're doing rectangular selection. (In this case we
|
|
|
|
* still did the above, so that copying e.g. the right-hand
|
|
|
|
* column from a table doesn't fill with spaces on the
|
|
|
|
* right.)
|
|
|
|
*/
|
|
|
|
if (rect) {
|
|
|
|
if (nlpos.x > bottom.x)
|
|
|
|
nlpos.x = bottom.x;
|
|
|
|
nl = (top.y < bottom.y);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (poslt(top, bottom) && poslt(top, nlpos)) {
|
|
|
|
wchar_t cbuf[16], *p;
|
|
|
|
int c;
|
|
|
|
int x = top.x;
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (ldata->chars[x].chr == UCSWIDE) {
|
|
|
|
top.x++;
|
|
|
|
continue;
|
|
|
|
}
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
while (1) {
|
|
|
|
int uc = ldata->chars[x].chr;
|
2006-02-13 22:18:17 +00:00
|
|
|
attr = ldata->chars[x].attr;
|
2019-09-08 19:29:00 +00:00
|
|
|
tc = ldata->chars[x].truecolour;
|
|
|
|
|
|
|
|
switch (uc & CSET_MASK) {
|
|
|
|
case CSET_LINEDRW:
|
|
|
|
if (!term->rawcnp) {
|
|
|
|
uc = term->ucsdata->unitab_xterm[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CSET_ASCII:
|
|
|
|
uc = term->ucsdata->unitab_line[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_SCOACS:
|
|
|
|
uc = term->ucsdata->unitab_scoacs[uc&0xFF];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
switch (uc & CSET_MASK) {
|
|
|
|
case CSET_ACP:
|
|
|
|
uc = term->ucsdata->unitab_font[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_OEMCP:
|
|
|
|
uc = term->ucsdata->unitab_oemcp[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
c = (uc & ~CSET_MASK);
|
2009-03-24 22:24:31 +00:00
|
|
|
#ifdef PLATFORM_IS_UTF16
|
2019-09-08 19:29:00 +00:00
|
|
|
if (uc > 0x10000 && uc < 0x110000) {
|
|
|
|
cbuf[0] = 0xD800 | ((uc - 0x10000) >> 10);
|
|
|
|
cbuf[1] = 0xDC00 | ((uc - 0x10000) & 0x3FF);
|
|
|
|
cbuf[2] = 0;
|
|
|
|
} else
|
2009-03-24 22:24:31 +00:00
|
|
|
#endif
|
2019-09-08 19:29:00 +00:00
|
|
|
{
|
|
|
|
cbuf[0] = uc;
|
|
|
|
cbuf[1] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DIRECT_FONT(uc)) {
|
|
|
|
if (c >= ' ' && c != 0x7F) {
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
char buf[2];
|
|
|
|
buffer_sink bs[1];
|
|
|
|
buffer_sink_init(bs, cbuf,
|
|
|
|
sizeof(cbuf) - sizeof(wchar_t));
|
2019-09-08 19:29:00 +00:00
|
|
|
if (is_dbcs_leadbyte(term->ucsdata->font_codepage, (BYTE) c)) {
|
|
|
|
buf[0] = c;
|
|
|
|
buf[1] = (char) (0xFF & ldata->chars[top.x + 1].chr);
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
put_mb_to_wc(bs, term->ucsdata->font_codepage,
|
|
|
|
buf, 2);
|
2019-09-08 19:29:00 +00:00
|
|
|
top.x++;
|
|
|
|
} else {
|
|
|
|
buf[0] = c;
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
put_mb_to_wc(bs, term->ucsdata->font_codepage,
|
|
|
|
buf, 1);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
assert(!bs->overflowed);
|
|
|
|
*(wchar_t *)bs->out = L'\0';
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
}
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
for (p = cbuf; *p; p++)
|
|
|
|
clip_addchar(&buf, *p, attr, tc);
|
|
|
|
|
|
|
|
if (ldata->chars[x].cc_next)
|
|
|
|
x += ldata->chars[x].cc_next;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
top.x++;
|
|
|
|
}
|
|
|
|
if (nl) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < sel_nl_sz; i++)
|
|
|
|
clip_addchar(&buf, sel_nl[i], 0, term->basic_erase_char.truecolour);
|
|
|
|
}
|
|
|
|
top.y++;
|
|
|
|
top.x = rect ? old_top_x : 0;
|
|
|
|
|
|
|
|
unlineptr(ldata);
|
2000-11-21 19:28:25 +00:00
|
|
|
}
|
2002-10-13 11:24:25 +00:00
|
|
|
#if SELECTION_NUL_TERMINATED
|
2017-06-14 13:11:05 +00:00
|
|
|
clip_addchar(&buf, 0, 0, term->basic_erase_char.truecolour);
|
2002-10-13 11:24:25 +00:00
|
|
|
#endif
|
2017-12-10 15:45:45 +00:00
|
|
|
/* Finally, transfer all that to the clipboard(s). */
|
|
|
|
{
|
|
|
|
int i;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool clip_local = false;
|
2017-12-10 15:45:45 +00:00
|
|
|
for (i = 0; i < n_clipboards; i++) {
|
|
|
|
if (clipboards[i] == CLIP_LOCAL) {
|
2018-10-29 19:50:29 +00:00
|
|
|
clip_local = true;
|
2017-12-10 15:45:45 +00:00
|
|
|
} else if (clipboards[i] != CLIP_NULL) {
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
win_clip_write(
|
|
|
|
term->win, clipboards[i], buf.textbuf, buf.attrbuf,
|
|
|
|
buf.tcbuf, buf.bufpos, desel);
|
2017-12-10 15:45:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (clip_local) {
|
|
|
|
sfree(term->last_selected_text);
|
|
|
|
sfree(term->last_selected_attr);
|
|
|
|
sfree(term->last_selected_tc);
|
|
|
|
term->last_selected_text = buf.textbuf;
|
|
|
|
term->last_selected_attr = buf.attrbuf;
|
|
|
|
term->last_selected_tc = buf.tcbuf;
|
|
|
|
term->last_selected_len = buf.bufpos;
|
|
|
|
} else {
|
|
|
|
sfree(buf.textbuf);
|
|
|
|
sfree(buf.attrbuf);
|
|
|
|
sfree(buf.tcbuf);
|
|
|
|
}
|
|
|
|
}
|
2000-11-21 19:28:25 +00:00
|
|
|
}
|
2001-05-10 08:34:20 +00:00
|
|
|
|
2017-12-10 15:45:45 +00:00
|
|
|
void term_copyall(Terminal *term, const int *clipboards, int n_clipboards)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2001-04-16 21:25:13 +00:00
|
|
|
pos top;
|
2004-03-10 17:11:12 +00:00
|
|
|
pos bottom;
|
|
|
|
tree234 *screen = term->screen;
|
2003-03-06 12:51:12 +00:00
|
|
|
top.y = -sblines(term);
|
2001-04-16 21:25:13 +00:00
|
|
|
top.x = 0;
|
2004-03-10 17:11:12 +00:00
|
|
|
bottom.y = find_last_nonempty_line(term, screen);
|
|
|
|
bottom.x = term->cols;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
clipme(term, top, bottom, false, true, clipboards, n_clipboards);
|
2017-12-09 12:00:13 +00:00
|
|
|
}
|
|
|
|
|
2017-12-10 14:53:55 +00:00
|
|
|
static void paste_from_clip_local(void *vterm)
|
|
|
|
{
|
|
|
|
Terminal *term = (Terminal *)vterm;
|
|
|
|
term_do_paste(term, term->last_selected_text, term->last_selected_len);
|
|
|
|
}
|
|
|
|
|
2017-12-10 15:45:45 +00:00
|
|
|
void term_request_copy(Terminal *term, const int *clipboards, int n_clipboards)
|
2017-12-10 14:53:55 +00:00
|
|
|
{
|
2017-12-10 15:45:45 +00:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < n_clipboards; i++) {
|
|
|
|
assert(clipboards[i] != CLIP_LOCAL);
|
|
|
|
if (clipboards[i] != CLIP_NULL) {
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
win_clip_write(term->win, clipboards[i],
|
|
|
|
term->last_selected_text, term->last_selected_attr,
|
|
|
|
term->last_selected_tc, term->last_selected_len,
|
2018-10-29 19:50:29 +00:00
|
|
|
false);
|
2017-12-10 15:45:45 +00:00
|
|
|
}
|
2017-12-10 14:53:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-09 12:00:13 +00:00
|
|
|
void term_request_paste(Terminal *term, int clipboard)
|
|
|
|
{
|
2017-12-10 14:53:55 +00:00
|
|
|
switch (clipboard) {
|
|
|
|
case CLIP_NULL:
|
|
|
|
/* Do nothing: CLIP_NULL never has data in it. */
|
|
|
|
break;
|
|
|
|
case CLIP_LOCAL:
|
|
|
|
queue_toplevel_callback(paste_from_clip_local, term);
|
|
|
|
break;
|
|
|
|
default:
|
Remove the 'Frontend' type and replace it with a vtable.
After the recent Seat and LogContext revamps, _nearly_ all the
remaining uses of the type 'Frontend' were in terminal.c, which needs
all sorts of interactions with the GUI window the terminal lives in,
from the obvious (actually drawing text on the window, reading and
writing the clipboard) to the obscure (minimising, maximising and
moving the window in response to particular escape sequences).
All of those functions are now provided by an abstraction called
TermWin. The few remaining uses of Frontend after _that_ are internal
to a particular platform directory, so as to spread the implementation
of that particular kind of Frontend between multiple source files; so
I've renamed all of those so that they take a more specifically named
type that refers to the particular implementation rather than the
general abstraction.
So now the name 'Frontend' no longer exists in the code base at all,
and everywhere one used to be used, it's completely clear whether it
was operating in one of Frontend's three abstract roles (and if so,
which), or whether it was specific to a particular implementation.
Another type that's disappeared is 'Context', which used to be a
typedef defined to something different on each platform, describing
whatever short-lived resources were necessary to draw on the terminal
window: the front end would provide a ready-made one when calling
term_paint, and the terminal could request one with get_ctx/free_ctx
if it wanted to do proactive window updates. Now that drawing context
lives inside the TermWin itself, because there was never any need to
have two of those contexts live at the same time.
(Another minor API change is that the window-title functions - both
reading and writing - have had a missing 'const' added to their char *
parameters / return values.)
I don't expect this change to enable any particularly interesting new
functionality (in particular, I have no plans that need more than one
implementation of TermWin in the same application). But it completes
the tidying-up that began with the Seat and LogContext rework.
2018-10-25 17:44:04 +00:00
|
|
|
win_clip_request_paste(term->win, clipboard);
|
2017-12-10 14:53:55 +00:00
|
|
|
break;
|
|
|
|
}
|
2001-05-10 08:34:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-10-22 16:11:33 +00:00
|
|
|
* The wordness array is mainly for deciding the disposition of the
|
|
|
|
* US-ASCII characters.
|
2001-05-10 08:34:20 +00:00
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
static int wordtype(Terminal *term, int uc)
|
2001-05-10 08:34:20 +00:00
|
|
|
{
|
2002-10-26 10:16:19 +00:00
|
|
|
struct ucsword {
|
2019-09-08 19:29:00 +00:00
|
|
|
int start, end, ctype;
|
2002-10-26 10:16:19 +00:00
|
|
|
};
|
|
|
|
static const struct ucsword ucs_words[] = {
|
2023-03-07 08:47:18 +00:00
|
|
|
{128, 160, 0},
|
|
|
|
{161, 191, 1},
|
|
|
|
{215, 215, 1},
|
|
|
|
{247, 247, 1},
|
|
|
|
{0x037e, 0x037e, 1}, /* Greek question mark */
|
|
|
|
{0x0387, 0x0387, 1}, /* Greek ano teleia */
|
|
|
|
{0x055a, 0x055f, 1}, /* Armenian punctuation */
|
|
|
|
{0x0589, 0x0589, 1}, /* Armenian full stop */
|
|
|
|
{0x0700, 0x070d, 1}, /* Syriac punctuation */
|
|
|
|
{0x104a, 0x104f, 1}, /* Myanmar punctuation */
|
|
|
|
{0x10fb, 0x10fb, 1}, /* Georgian punctuation */
|
|
|
|
{0x1361, 0x1368, 1}, /* Ethiopic punctuation */
|
|
|
|
{0x166d, 0x166e, 1}, /* Canadian Syl. punctuation */
|
|
|
|
{0x17d4, 0x17dc, 1}, /* Khmer punctuation */
|
|
|
|
{0x1800, 0x180a, 1}, /* Mongolian punctuation */
|
|
|
|
{0x2000, 0x200a, 0}, /* Various spaces */
|
|
|
|
{0x2070, 0x207f, 2}, /* superscript */
|
|
|
|
{0x2080, 0x208f, 2}, /* subscript */
|
|
|
|
{0x200b, 0x27ff, 1}, /* punctuation and symbols */
|
|
|
|
{0x3000, 0x3000, 0}, /* ideographic space */
|
|
|
|
{0x3001, 0x3020, 1}, /* ideographic punctuation */
|
|
|
|
{0x303f, 0x309f, 3}, /* Hiragana */
|
|
|
|
{0x30a0, 0x30ff, 3}, /* Katakana */
|
|
|
|
{0x3300, 0x9fff, 3}, /* CJK Ideographs */
|
|
|
|
{0xac00, 0xd7a3, 3}, /* Hangul Syllables */
|
|
|
|
{0xf900, 0xfaff, 3}, /* CJK Ideographs */
|
|
|
|
{0xfe30, 0xfe6b, 1}, /* punctuation forms */
|
|
|
|
{0xff00, 0xff0f, 1}, /* half/fullwidth ASCII */
|
|
|
|
{0xff1a, 0xff20, 1}, /* half/fullwidth ASCII */
|
|
|
|
{0xff3b, 0xff40, 1}, /* half/fullwidth ASCII */
|
|
|
|
{0xff5b, 0xff64, 1}, /* half/fullwidth ASCII */
|
|
|
|
{0xfff0, 0xffff, 0}, /* half/fullwidth ASCII */
|
|
|
|
{0, 0, 0}
|
2001-05-10 08:34:20 +00:00
|
|
|
};
|
2002-10-26 10:16:19 +00:00
|
|
|
const struct ucsword *wptr;
|
2001-05-10 08:34:20 +00:00
|
|
|
|
|
|
|
switch (uc & CSET_MASK) {
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
case CSET_LINEDRW:
|
2019-09-08 19:29:00 +00:00
|
|
|
uc = term->ucsdata->unitab_xterm[uc & 0xFF];
|
|
|
|
break;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
case CSET_ASCII:
|
2019-09-08 19:29:00 +00:00
|
|
|
uc = term->ucsdata->unitab_line[uc & 0xFF];
|
|
|
|
break;
|
|
|
|
case CSET_SCOACS:
|
|
|
|
uc = term->ucsdata->unitab_scoacs[uc&0xFF];
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
}
|
|
|
|
switch (uc & CSET_MASK) {
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
case CSET_ACP:
|
2019-09-08 19:29:00 +00:00
|
|
|
uc = term->ucsdata->unitab_font[uc & 0xFF];
|
|
|
|
break;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
case CSET_OEMCP:
|
2019-09-08 19:29:00 +00:00
|
|
|
uc = term->ucsdata->unitab_oemcp[uc & 0xFF];
|
|
|
|
break;
|
2001-05-10 08:34:20 +00:00
|
|
|
}
|
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
/* For DBCS fonts I can't do anything useful. Even this will sometimes
|
2001-09-15 15:54:24 +00:00
|
|
|
* fail as there's such a thing as a double width space. :-(
|
|
|
|
*/
|
2003-01-14 18:28:23 +00:00
|
|
|
if (term->ucsdata->dbcs_screenfont &&
|
2019-09-08 19:29:00 +00:00
|
|
|
term->ucsdata->font_codepage == term->ucsdata->line_codepage)
|
|
|
|
return (uc != ' ');
|
2001-09-15 15:54:24 +00:00
|
|
|
|
2001-05-10 08:34:20 +00:00
|
|
|
if (uc < 0x80)
|
2019-09-08 19:29:00 +00:00
|
|
|
return term->wordness[uc];
|
2001-05-10 08:34:20 +00:00
|
|
|
|
|
|
|
for (wptr = ucs_words; wptr->start; wptr++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (uc >= wptr->start && uc <= wptr->end)
|
|
|
|
return wptr->ctype;
|
2001-05-10 08:34:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 2;
|
2000-11-21 19:28:25 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 18:36:20 +00:00
|
|
|
static int line_cols(Terminal *term, termline *ldata)
|
|
|
|
{
|
|
|
|
int cols = term->cols;
|
Add a per-line 'trusted' status in Terminal.
This indicates that a line contains trusted information (originated by
PuTTY) or untrusted (from the server). Trusted lines are prefixed by a
three-column signature consisting of the trust sigil (i.e. PuTTY icon)
and a separating space.
To protect against a server using escape sequences to move the cursor
back up to a trusted line and overwrite its contents, any attempt to
write to a termline is preceded by a call to check_trust_status(),
which clears the line completely if the terminal's current trust
status is different from the previous state of that line.
In the terminal data structures, the trust sigil is represented by
0xDFFE (an otherwise unused value, because it's in the surrogate
space). For bidi purposes I've arranged to treat that value as
direction-neutral, so that it will appear on the right if a terminal
line needs it to. (Not that that's currently likely to happen, with
PuTTY not being properly localised, but it's a bit of futureproofing.)
The bidi system is also where I actually insert the trust sigil: the
_logical_ terminal data structures don't include it. term_bidi_line
was a convenient place to add it, because that function was already
transforming a logical terminal line into a physical one in a way that
also generates a logical<->physical mapping table for handling mouse
clicks and cursor positioning; so that function now adds the trust
sigil as well as running the bidi algorithm.
(A knock-on effect of _that_ is that the log<->phys position map now
has to have a value for 'no correspondence', because if the user does
click on the trust sigil, there's no logical terminal position
corresponding to that. So the map can now contain the special value
BIDI_CHAR_INDEX_NONE, and anyone looking things up in it has to be
prepared to receive that as an answer.)
Of course, this terminal-data transformation can't be kept _wholly_
within term_bidi_line, because unlike proper bidi, it actually reduces
the number of visible columns on the line. So the wrapping code
(during glyph display and also copy and paste) has to take account of
the trusted status and use it to ignore the last 3 columns of the
line. This is probably not done absolutely perfectly, but then, it
doesn't need to be - trusted lines will be filled with well-controlled
data generated from the SSH code, which won't be doing every trick in
the book with escape sequences. Only untrusted terminal lines will be
using all the terminal's capabilities, and they don't have this sigil
getting in the way.
2019-03-10 14:39:28 +00:00
|
|
|
if (ldata->trusted) {
|
|
|
|
cols -= TRUST_SIGIL_WIDTH;
|
|
|
|
}
|
2019-02-26 18:36:20 +00:00
|
|
|
if (ldata->lattr & LATTR_WRAPPED2)
|
|
|
|
cols--;
|
|
|
|
if (cols < 0)
|
|
|
|
cols = 0;
|
|
|
|
return cols;
|
|
|
|
}
|
|
|
|
|
1999-01-08 13:02:13 +00:00
|
|
|
/*
|
|
|
|
* Spread the selection outwards according to the selection mode.
|
|
|
|
*/
|
2002-10-22 16:11:33 +00:00
|
|
|
static pos sel_spread_half(Terminal *term, pos p, int dir)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline *ldata;
|
1999-01-08 13:02:13 +00:00
|
|
|
short wvalue;
|
2003-03-06 12:51:12 +00:00
|
|
|
int topy = -sblines(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2001-04-16 21:25:13 +00:00
|
|
|
ldata = lineptr(p.y);
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
switch (term->selmode) {
|
1999-01-08 13:02:13 +00:00
|
|
|
case SM_CHAR:
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* In this mode, every character is a separate unit, except
|
|
|
|
* for runs of spaces at the end of a non-wrapping line.
|
|
|
|
*/
|
|
|
|
if (!(ldata->lattr & LATTR_WRAPPED)) {
|
|
|
|
termchar *q = ldata->chars + line_cols(term, ldata);
|
|
|
|
while (q > ldata->chars &&
|
|
|
|
IS_SPACE_CHR(q[-1].chr) && !q[-1].cc_next)
|
|
|
|
q--;
|
|
|
|
if (q == ldata->chars + term->cols)
|
|
|
|
q--;
|
|
|
|
if (p.x >= q - ldata->chars)
|
|
|
|
p.x = (dir == -1 ? q - ldata->chars : term->cols - 1);
|
|
|
|
}
|
|
|
|
break;
|
1999-01-08 13:02:13 +00:00
|
|
|
case SM_WORD:
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* In this mode, the units are maximal runs of characters
|
|
|
|
* whose `wordness' has the same value.
|
|
|
|
*/
|
|
|
|
wvalue = wordtype(term, UCSGET(ldata->chars, p.x));
|
|
|
|
if (dir == +1) {
|
|
|
|
while (1) {
|
|
|
|
int maxcols = line_cols(term, ldata);
|
|
|
|
if (p.x < maxcols-1) {
|
|
|
|
if (wordtype(term, UCSGET(ldata->chars, p.x+1)) == wvalue)
|
|
|
|
p.x++;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
if (p.y+1 < term->rows &&
|
2012-12-17 08:40:33 +00:00
|
|
|
(ldata->lattr & LATTR_WRAPPED)) {
|
2019-09-08 19:29:00 +00:00
|
|
|
termline *ldata2;
|
|
|
|
ldata2 = lineptr(p.y+1);
|
|
|
|
if (wordtype(term, UCSGET(ldata2->chars, 0))
|
|
|
|
== wvalue) {
|
|
|
|
p.x = 0;
|
|
|
|
p.y++;
|
|
|
|
unlineptr(ldata);
|
|
|
|
ldata = ldata2;
|
|
|
|
} else {
|
|
|
|
unlineptr(ldata2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
while (1) {
|
|
|
|
if (p.x > 0) {
|
|
|
|
if (wordtype(term, UCSGET(ldata->chars, p.x-1)) == wvalue)
|
|
|
|
p.x--;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
termline *ldata2;
|
|
|
|
int maxcols;
|
|
|
|
if (p.y <= topy)
|
|
|
|
break;
|
|
|
|
ldata2 = lineptr(p.y-1);
|
|
|
|
maxcols = line_cols(term, ldata2);
|
|
|
|
if (ldata2->lattr & LATTR_WRAPPED) {
|
|
|
|
if (wordtype(term, UCSGET(ldata2->chars, maxcols-1))
|
|
|
|
== wvalue) {
|
|
|
|
p.x = maxcols-1;
|
|
|
|
p.y--;
|
|
|
|
unlineptr(ldata);
|
|
|
|
ldata = ldata2;
|
|
|
|
} else {
|
|
|
|
unlineptr(ldata2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
1999-01-08 13:02:13 +00:00
|
|
|
case SM_LINE:
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* In this mode, every line is a unit.
|
|
|
|
*/
|
|
|
|
p.x = (dir == -1 ? 0 : term->cols - 1);
|
|
|
|
break;
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
|
|
|
|
unlineptr(ldata);
|
1999-01-08 13:02:13 +00:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
static void sel_spread(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->seltype == LEXICOGRAPHIC) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->selstart = sel_spread_half(term, term->selstart, -1);
|
|
|
|
decpos(term->selend);
|
|
|
|
term->selend = sel_spread_half(term, term->selend, +1);
|
|
|
|
incpos(term->selend);
|
2001-10-31 18:50:09 +00:00
|
|
|
}
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2013-08-17 16:06:12 +00:00
|
|
|
static void term_paste_callback(void *vterm)
|
|
|
|
{
|
|
|
|
Terminal *term = (Terminal *)vterm;
|
|
|
|
|
|
|
|
if (term->paste_len == 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
return;
|
2013-08-17 16:06:12 +00:00
|
|
|
|
|
|
|
while (term->paste_pos < term->paste_len) {
|
2024-09-24 08:37:36 +00:00
|
|
|
size_t n = 0;
|
2019-09-08 19:29:00 +00:00
|
|
|
while (n + term->paste_pos < term->paste_len) {
|
|
|
|
if (term->paste_buffer[term->paste_pos + n++] == '\015')
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (term->ldisc) {
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
strbuf *buf = term_input_data_from_unicode(
|
|
|
|
term, term->paste_buffer + term->paste_pos, n);
|
|
|
|
term_keyinput_internal(term, buf->s, buf->len, false);
|
|
|
|
strbuf_free(buf);
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
term->paste_pos += n;
|
2013-08-17 16:06:12 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->paste_pos < term->paste_len) {
|
Remove the timed part of the terminal paste mechanism.
In r10020 I carefully reimplemented using timing.c and callback.c the
same policy for large pastes that the previous code appeared to be
implementing ad-hoc, which included a 450ms delay between sending
successive lines of pasted text if no visible acknowledgment of the
just-sent line (in the form of a \n or \r) came back from the
application.
However, it turns out that that *wasn't* what the old code was doing.
It *would* have done that, but for the bug that it never actually set
the 'last_paste' variable, and never has done since it was first
introduced way back in r516! So the policy I thought had been in force
forever has in fact only been in force since I unwittingly fixed that
bug in r10020 - and it turns out to be a bad idea, breaking pastes
into vi in particular.
So I've removed the timed paste code completely, on the basis that
it's never actually worked and nobody seems to have been unhappy about
that. Now we still break large pastes into separate lines and send
them in successive top-level callbacks, and the user can still press a
key to interrupt a paste if they manage to catch it still going on,
but there's no attempted *delay* any more.
(It's possible that what I *really* ought to be doing is calling
back->sendbuffer() to see whether the backend is consuming the data
pasted so far, and if not, deferring the rest of the paste until the
send buffer becomes smaller. Then we could have pasting be delayed by
back-pressure from the recipient, and still manually interruptible
during that delay, but not have it delayed by anything else. But what
we have here should at least manage to be equivalent to the *actual*
rather than the intended old policy.)
[originally from svn r10041]
[r516 == 0d5d39064a0d078af47e3158313dd2b82bfd167c]
[r10020 == 7be9af74ec8b97f948d6b3d67ebaf1a97138da33]
2013-09-15 14:05:38 +00:00
|
|
|
queue_toplevel_callback(term_paste_callback, term);
|
2019-09-08 19:29:00 +00:00
|
|
|
return;
|
|
|
|
}
|
2013-08-17 16:06:12 +00:00
|
|
|
}
|
2019-06-17 19:21:06 +00:00
|
|
|
term_bracketed_paste_stop(term);
|
2013-08-17 16:06:12 +00:00
|
|
|
sfree(term->paste_buffer);
|
|
|
|
term->paste_buffer = NULL;
|
|
|
|
term->paste_len = 0;
|
|
|
|
}
|
|
|
|
|
2018-03-11 17:40:42 +00:00
|
|
|
/*
|
|
|
|
* Specialist string compare function. Returns true if the buffer of
|
|
|
|
* alen wide characters starting at a has as a prefix the buffer of
|
|
|
|
* blen characters starting at b.
|
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
static bool wstartswith(const wchar_t *a, size_t alen,
|
2018-03-11 17:40:42 +00:00
|
|
|
const wchar_t *b, size_t blen)
|
|
|
|
{
|
|
|
|
return alen >= blen && !wcsncmp(a, b, blen);
|
|
|
|
}
|
|
|
|
|
2024-09-24 08:37:36 +00:00
|
|
|
void term_do_paste(Terminal *term, const wchar_t *data, size_t len)
|
2001-05-17 10:06:54 +00:00
|
|
|
{
|
2018-03-11 17:40:42 +00:00
|
|
|
const wchar_t *p;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool paste_controls = conf_get_bool(term->conf, CONF_paste_controls);
|
2002-10-20 13:23:30 +00:00
|
|
|
|
Stop front ends remembering the data of their last paste.
Previously, both the Unix and Windows front ends would respond to a
paste action by retrieving data from the system clipboard, converting
it appropriately, _storing_ it in a persistent dynamic data block
inside the front end, and then calling term_do_paste(term), which in
turn would call back to the front end via get_clip() to retrieve the
current contents of that stored data block.
But, as far as I can tell, this was a completely pointless mechanism,
because after a data block was written into this storage area, it
would be immediately used for exactly one paste, and then never
accessed again until the next paste action caused it to be freed and
replaced with a new chunk of pasted data.
So why on earth was it stored persistently at all, and why that
callback mechanism from frontend to terminal back to frontend to
retrieve it for the actual paste action? I have no idea. This change
removes the entire system and replaces it with the completely obvious
alternative: the character-set-converted version of paste data is
allocated in a _local_ variable in the frontend paste functions,
passed directly to term_do_paste which now takes (buffer,length)
parameters, and freed immediately afterwards. get_clip() is gone.
2017-12-09 08:41:03 +00:00
|
|
|
/*
|
|
|
|
* Pasting data into the terminal counts as a keyboard event (for
|
|
|
|
* purposes of the 'Reset scrollback on keypress' config option),
|
|
|
|
* unless the paste is zero-length.
|
|
|
|
*/
|
|
|
|
if (len == 0)
|
|
|
|
return;
|
|
|
|
term_seen_key_event(term);
|
|
|
|
|
|
|
|
if (term->paste_buffer)
|
|
|
|
sfree(term->paste_buffer);
|
|
|
|
term->paste_pos = term->paste_len = 0;
|
|
|
|
term->paste_buffer = snewn(len + 12, wchar_t);
|
|
|
|
|
2024-08-10 11:11:28 +00:00
|
|
|
if (term->bracketed_paste && !term->no_bracketed_paste)
|
2019-06-17 19:21:06 +00:00
|
|
|
term_bracketed_paste_start(term);
|
2001-05-17 10:06:54 +00:00
|
|
|
|
2018-03-11 17:40:42 +00:00
|
|
|
p = data;
|
Stop front ends remembering the data of their last paste.
Previously, both the Unix and Windows front ends would respond to a
paste action by retrieving data from the system clipboard, converting
it appropriately, _storing_ it in a persistent dynamic data block
inside the front end, and then calling term_do_paste(term), which in
turn would call back to the front end via get_clip() to retrieve the
current contents of that stored data block.
But, as far as I can tell, this was a completely pointless mechanism,
because after a data block was written into this storage area, it
would be immediately used for exactly one paste, and then never
accessed again until the next paste action caused it to be freed and
replaced with a new chunk of pasted data.
So why on earth was it stored persistently at all, and why that
callback mechanism from frontend to terminal back to frontend to
retrieve it for the actual paste action? I have no idea. This change
removes the entire system and replaces it with the completely obvious
alternative: the character-set-converted version of paste data is
allocated in a _local_ variable in the frontend paste functions,
passed directly to term_do_paste which now takes (buffer,length)
parameters, and freed immediately afterwards. get_clip() is gone.
2017-12-09 08:41:03 +00:00
|
|
|
while (p < data + len) {
|
2018-03-11 17:40:42 +00:00
|
|
|
wchar_t wc = *p++;
|
|
|
|
|
|
|
|
if (wc == sel_nl[0] &&
|
|
|
|
wstartswith(p-1, data+len-(p-1), sel_nl, sel_nl_sz)) {
|
|
|
|
/*
|
|
|
|
* This is the (platform-dependent) sequence that the host
|
|
|
|
* OS uses to represent newlines in clipboard data.
|
|
|
|
* Normalise it to a press of CR.
|
|
|
|
*/
|
|
|
|
p += sel_nl_sz - 1;
|
|
|
|
wc = '\015';
|
2001-05-17 10:06:54 +00:00
|
|
|
}
|
|
|
|
|
2018-03-11 17:40:42 +00:00
|
|
|
if ((wc & ~(wint_t)0x9F) == 0) {
|
|
|
|
/*
|
|
|
|
* This is a control code, either in the range 0x00-0x1F
|
|
|
|
* or 0x80-0x9F. We reject all of these in pastecontrols
|
|
|
|
* mode, except for a small set of permitted ones.
|
|
|
|
*/
|
|
|
|
if (!paste_controls) {
|
|
|
|
/* In line with xterm 292, accepted control chars are:
|
|
|
|
* CR, LF, tab, backspace. (And DEL, i.e. 0x7F, but
|
|
|
|
* that's permitted by virtue of not matching the bit
|
|
|
|
* mask that got us into this if statement, so we
|
|
|
|
* don't have to permit it here. */
|
|
|
|
static const unsigned mask =
|
|
|
|
(1<<13) | (1<<10) | (1<<9) | (1<<8);
|
|
|
|
|
|
|
|
if (wc > 15 || !((mask >> wc) & 1))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wc == '\033' && term->bracketed_paste &&
|
|
|
|
wstartswith(p-1, data+len-(p-1), L"\033[201~", 6)) {
|
|
|
|
/*
|
|
|
|
* Also, in bracketed-paste mode, reject the ESC
|
|
|
|
* character that begins the end-of-paste sequence.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
2012-02-19 10:27:18 +00:00
|
|
|
}
|
2018-03-11 17:40:42 +00:00
|
|
|
|
|
|
|
term->paste_buffer[term->paste_len++] = wc;
|
Stop front ends remembering the data of their last paste.
Previously, both the Unix and Windows front ends would respond to a
paste action by retrieving data from the system clipboard, converting
it appropriately, _storing_ it in a persistent dynamic data block
inside the front end, and then calling term_do_paste(term), which in
turn would call back to the front end via get_clip() to retrieve the
current contents of that stored data block.
But, as far as I can tell, this was a completely pointless mechanism,
because after a data block was written into this storage area, it
would be immediately used for exactly one paste, and then never
accessed again until the next paste action caused it to be freed and
replaced with a new chunk of pasted data.
So why on earth was it stored persistently at all, and why that
callback mechanism from frontend to terminal back to frontend to
retrieve it for the actual paste action? I have no idea. This change
removes the entire system and replaces it with the completely obvious
alternative: the character-set-converted version of paste data is
allocated in a _local_ variable in the frontend paste functions,
passed directly to term_do_paste which now takes (buffer,length)
parameters, and freed immediately afterwards. get_clip() is gone.
2017-12-09 08:41:03 +00:00
|
|
|
}
|
2012-02-19 10:27:18 +00:00
|
|
|
|
Stop front ends remembering the data of their last paste.
Previously, both the Unix and Windows front ends would respond to a
paste action by retrieving data from the system clipboard, converting
it appropriately, _storing_ it in a persistent dynamic data block
inside the front end, and then calling term_do_paste(term), which in
turn would call back to the front end via get_clip() to retrieve the
current contents of that stored data block.
But, as far as I can tell, this was a completely pointless mechanism,
because after a data block was written into this storage area, it
would be immediately used for exactly one paste, and then never
accessed again until the next paste action caused it to be freed and
replaced with a new chunk of pasted data.
So why on earth was it stored persistently at all, and why that
callback mechanism from frontend to terminal back to frontend to
retrieve it for the actual paste action? I have no idea. This change
removes the entire system and replaces it with the completely obvious
alternative: the character-set-converted version of paste data is
allocated in a _local_ variable in the frontend paste functions,
passed directly to term_do_paste which now takes (buffer,length)
parameters, and freed immediately afterwards. get_clip() is gone.
2017-12-09 08:41:03 +00:00
|
|
|
/* Assume a small paste will be OK in one go. */
|
|
|
|
if (term->paste_len < 256) {
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
if (term->ldisc) {
|
|
|
|
strbuf *buf = term_input_data_from_unicode(
|
|
|
|
term, term->paste_buffer, term->paste_len);
|
2024-09-24 08:37:36 +00:00
|
|
|
assert(buf->len <= INT_MAX); /* because paste_len was also small */
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
term_keyinput_internal(term, buf->s, buf->len, false);
|
|
|
|
strbuf_free(buf);
|
|
|
|
}
|
Stop front ends remembering the data of their last paste.
Previously, both the Unix and Windows front ends would respond to a
paste action by retrieving data from the system clipboard, converting
it appropriately, _storing_ it in a persistent dynamic data block
inside the front end, and then calling term_do_paste(term), which in
turn would call back to the front end via get_clip() to retrieve the
current contents of that stored data block.
But, as far as I can tell, this was a completely pointless mechanism,
because after a data block was written into this storage area, it
would be immediately used for exactly one paste, and then never
accessed again until the next paste action caused it to be freed and
replaced with a new chunk of pasted data.
So why on earth was it stored persistently at all, and why that
callback mechanism from frontend to terminal back to frontend to
retrieve it for the actual paste action? I have no idea. This change
removes the entire system and replaces it with the completely obvious
alternative: the character-set-converted version of paste data is
allocated in a _local_ variable in the frontend paste functions,
passed directly to term_do_paste which now takes (buffer,length)
parameters, and freed immediately afterwards. get_clip() is gone.
2017-12-09 08:41:03 +00:00
|
|
|
if (term->paste_buffer)
|
|
|
|
sfree(term->paste_buffer);
|
2019-06-17 19:21:06 +00:00
|
|
|
term_bracketed_paste_stop(term);
|
|
|
|
term->paste_buffer = NULL;
|
Stop front ends remembering the data of their last paste.
Previously, both the Unix and Windows front ends would respond to a
paste action by retrieving data from the system clipboard, converting
it appropriately, _storing_ it in a persistent dynamic data block
inside the front end, and then calling term_do_paste(term), which in
turn would call back to the front end via get_clip() to retrieve the
current contents of that stored data block.
But, as far as I can tell, this was a completely pointless mechanism,
because after a data block was written into this storage area, it
would be immediately used for exactly one paste, and then never
accessed again until the next paste action caused it to be freed and
replaced with a new chunk of pasted data.
So why on earth was it stored persistently at all, and why that
callback mechanism from frontend to terminal back to frontend to
retrieve it for the actual paste action? I have no idea. This change
removes the entire system and replaces it with the completely obvious
alternative: the character-set-converted version of paste data is
allocated in a _local_ variable in the frontend paste functions,
passed directly to term_do_paste which now takes (buffer,length)
parameters, and freed immediately afterwards. get_clip() is gone.
2017-12-09 08:41:03 +00:00
|
|
|
term->paste_pos = term->paste_len = 0;
|
2001-05-17 10:06:54 +00:00
|
|
|
}
|
2013-08-17 16:06:12 +00:00
|
|
|
|
Remove the timed part of the terminal paste mechanism.
In r10020 I carefully reimplemented using timing.c and callback.c the
same policy for large pastes that the previous code appeared to be
implementing ad-hoc, which included a 450ms delay between sending
successive lines of pasted text if no visible acknowledgment of the
just-sent line (in the form of a \n or \r) came back from the
application.
However, it turns out that that *wasn't* what the old code was doing.
It *would* have done that, but for the bug that it never actually set
the 'last_paste' variable, and never has done since it was first
introduced way back in r516! So the policy I thought had been in force
forever has in fact only been in force since I unwittingly fixed that
bug in r10020 - and it turns out to be a bad idea, breaking pastes
into vi in particular.
So I've removed the timed paste code completely, on the basis that
it's never actually worked and nobody seems to have been unhappy about
that. Now we still break large pastes into separate lines and send
them in successive top-level callbacks, and the user can still press a
key to interrupt a paste if they manage to catch it still going on,
but there's no attempted *delay* any more.
(It's possible that what I *really* ought to be doing is calling
back->sendbuffer() to see whether the backend is consuming the data
pasted so far, and if not, deferring the rest of the paste until the
send buffer becomes smaller. Then we could have pasting be delayed by
back-pressure from the recipient, and still manually interruptible
during that delay, but not have it delayed by anything else. But what
we have here should at least manage to be equivalent to the *actual*
rather than the intended old policy.)
[originally from svn r10041]
[r516 == 0d5d39064a0d078af47e3158313dd2b82bfd167c]
[r10020 == 7be9af74ec8b97f948d6b3d67ebaf1a97138da33]
2013-09-15 14:05:38 +00:00
|
|
|
queue_toplevel_callback(term_paste_callback, term);
|
2001-05-17 10:06:54 +00:00
|
|
|
}
|
|
|
|
|
2003-01-25 16:16:45 +00:00
|
|
|
void term_mouse(Terminal *term, Mouse_Button braw, Mouse_Button bcooked,
|
2019-09-08 19:29:00 +00:00
|
|
|
Mouse_Action a, int x, int y, bool shift, bool ctrl, bool alt)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2001-04-16 21:25:13 +00:00
|
|
|
pos selpoint;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
termline *ldata;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool raw_mouse = (term->xterm_mouse &&
|
|
|
|
!term->no_mouse_rep &&
|
|
|
|
!(term->mouse_override && shift));
|
2001-10-31 18:50:09 +00:00
|
|
|
int default_seltype;
|
2001-05-06 14:35:20 +00:00
|
|
|
|
2019-12-20 13:56:58 +00:00
|
|
|
// Don't do anything if mouse movement events weren't requested;
|
|
|
|
// Note: return early to avoid doing all of this code on every mouse move
|
|
|
|
// event only to throw it away.
|
|
|
|
if (a == MA_MOVE && (!raw_mouse || term->xterm_mouse < 3)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2001-09-08 12:37:48 +00:00
|
|
|
if (y < 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
y = 0;
|
|
|
|
if (a == MA_DRAG && !raw_mouse)
|
|
|
|
term_scroll(term, 0, -1);
|
2001-09-08 12:37:48 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
if (y >= term->rows) {
|
2019-09-08 19:29:00 +00:00
|
|
|
y = term->rows - 1;
|
|
|
|
if (a == MA_DRAG && !raw_mouse)
|
|
|
|
term_scroll(term, 0, +1);
|
2001-09-08 12:37:48 +00:00
|
|
|
}
|
2001-05-06 14:35:20 +00:00
|
|
|
if (x < 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
if (y > 0 && !raw_mouse && term->seltype != RECTANGULAR) {
|
2017-10-01 20:53:32 +00:00
|
|
|
/*
|
|
|
|
* When we're using the mouse for normal raster-based
|
|
|
|
* selection, dragging off the left edge of a terminal row
|
|
|
|
* is treated the same as the right-hand end of the
|
|
|
|
* previous row, in that it's considered to identify a
|
|
|
|
* point _before_ the first character on row y.
|
|
|
|
*
|
|
|
|
* But if the mouse action is going to be used for
|
|
|
|
* anything else - rectangular selection, or xterm mouse
|
|
|
|
* tracking - then we disable this special treatment.
|
|
|
|
*/
|
2019-09-08 19:29:00 +00:00
|
|
|
x = term->cols - 1;
|
|
|
|
y--;
|
|
|
|
} else
|
|
|
|
x = 0;
|
1999-01-22 09:36:21 +00:00
|
|
|
}
|
2002-10-22 16:11:33 +00:00
|
|
|
if (x >= term->cols)
|
2019-09-08 19:29:00 +00:00
|
|
|
x = term->cols - 1;
|
1999-01-08 13:10:19 +00:00
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
selpoint.y = y + term->disptop;
|
2001-04-16 21:25:13 +00:00
|
|
|
ldata = lineptr(selpoint.y);
|
2004-11-28 09:24:57 +00:00
|
|
|
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
if ((ldata->lattr & LATTR_MODE) != LATTR_NORM)
|
2019-09-08 19:29:00 +00:00
|
|
|
x /= 2;
|
2004-11-28 09:24:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Transform x through the bidi algorithm to find the _logical_
|
|
|
|
* click point from the physical one.
|
|
|
|
*/
|
|
|
|
if (term_bidi_line(term, ldata, y) != NULL) {
|
2019-09-08 19:29:00 +00:00
|
|
|
x = term->post_bidi_cache[y].backward[x];
|
2004-11-28 09:24:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
selpoint.x = x;
|
Re-engineering of terminal emulator, phase 1.
The active terminal screen is no longer an array of `unsigned long'
encoding 16-bit Unicode plus 16 attribute bits. Now it's an array of
`termchar' structures, which currently have 32-bit Unicode and 32
attribute bits but which will probably expand further in future.
To prevent bloat of the memory footprint, I've introduced a mostly
RLE-like compression scheme for storing scrollback: each line is
compressed into a compact (but hard to modify) form when it moves
into the term->scrollback tree, and is temporarily decompressed when
the user wants to scroll back over it. My initial tests suggest that
this compression averages about 1/4 of the previous (32 bits per
character cell) data size in typical output, which means this is an
improvement even without counting the new ability to extend the
information stored in each character cell.
Another beneficial side effect is that the insane format in which
Unicode was passed to front ends through do_text() has now been
rendered sane.
Testing is incomplete; this _may_ still have instabilities. Windows
and Unix front ends both seem to work as far as I've looked, but I
haven't yet looked very hard. The Mac front end I've edited (it
seemed obvious how to change it) but I can't compile or test it.
As an immediate functional effect, the terminal emulator now
supports full 32-bit Unicode to whatever extent the host platform
allows it to. For example, if you output a 4-or-more-byte UTF-8
character in Unix pterm, it will not display it properly, but it
will correctly paste it back out in a UTF8_STRING selection. Windows
is more restricted, sadly.
[originally from svn r4609]
2004-10-13 11:50:16 +00:00
|
|
|
unlineptr(ldata);
|
1999-01-08 13:02:13 +00:00
|
|
|
|
2006-06-11 12:56:52 +00:00
|
|
|
/*
|
|
|
|
* If we're in the middle of a selection operation, we ignore raw
|
|
|
|
* mouse mode until it's done (we must have been not in raw mouse
|
|
|
|
* mode when it started).
|
|
|
|
* This makes use of Shift for selection reliable, and avoids the
|
|
|
|
* host seeing mouse releases for which they never saw corresponding
|
|
|
|
* presses.
|
|
|
|
*/
|
|
|
|
if (raw_mouse &&
|
2019-09-08 19:29:00 +00:00
|
|
|
(term->selstate != ABOUT_TO) && (term->selstate != DRAGGING)) {
|
|
|
|
int encstate = 0, r, c;
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool wheel;
|
2024-09-08 16:20:16 +00:00
|
|
|
char *response = NULL;
|
2001-05-06 14:20:41 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->ldisc) {
|
2002-10-26 10:16:19 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
switch (braw) {
|
|
|
|
case MBT_LEFT:
|
|
|
|
encstate = 0x00; /* left button down */
|
2018-10-29 19:50:29 +00:00
|
|
|
wheel = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case MBT_MIDDLE:
|
|
|
|
encstate = 0x01;
|
2018-10-29 19:50:29 +00:00
|
|
|
wheel = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case MBT_RIGHT:
|
|
|
|
encstate = 0x02;
|
2018-10-29 19:50:29 +00:00
|
|
|
wheel = false;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case MBT_WHEEL_UP:
|
|
|
|
encstate = 0x40;
|
2018-10-29 19:50:29 +00:00
|
|
|
wheel = true;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case MBT_WHEEL_DOWN:
|
|
|
|
encstate = 0x41;
|
2018-10-29 19:50:29 +00:00
|
|
|
wheel = true;
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
2022-11-22 18:36:23 +00:00
|
|
|
case MBT_WHEEL_LEFT:
|
|
|
|
encstate = 0x42;
|
|
|
|
wheel = true;
|
|
|
|
break;
|
|
|
|
case MBT_WHEEL_RIGHT:
|
|
|
|
encstate = 0x43;
|
|
|
|
wheel = true;
|
|
|
|
break;
|
2019-12-20 13:56:58 +00:00
|
|
|
case MBT_NOTHING:
|
|
|
|
assert( a == MA_MOVE );
|
|
|
|
encstate = 0x03; // release; no buttons pressed
|
|
|
|
wheel = false;
|
|
|
|
break;
|
2019-09-08 19:29:00 +00:00
|
|
|
default:
|
2014-02-16 16:40:45 +00:00
|
|
|
return;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
2014-02-16 16:40:45 +00:00
|
|
|
if (wheel) {
|
|
|
|
/* For mouse wheel buttons, we only ever expect to see
|
|
|
|
* MA_CLICK actions, and we don't try to keep track of
|
|
|
|
* the buttons being 'pressed' (since without matching
|
|
|
|
* click/release pairs that's pointless). */
|
|
|
|
if (a != MA_CLICK)
|
|
|
|
return;
|
|
|
|
} else switch (a) {
|
2019-09-08 19:29:00 +00:00
|
|
|
case MA_DRAG:
|
|
|
|
if (term->xterm_mouse == 1)
|
|
|
|
return;
|
2019-12-20 13:56:58 +00:00
|
|
|
encstate += 0x20; // motion indicator
|
|
|
|
break;
|
|
|
|
case MA_MOVE: // mouse move without buttons
|
|
|
|
assert( braw == MBT_NOTHING && bcooked == MBT_NOTHING );
|
|
|
|
if (term->xterm_mouse < 3)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (selpoint.x == term->raw_mouse_reported_x &&
|
|
|
|
selpoint.y == term->raw_mouse_reported_y)
|
|
|
|
return;
|
|
|
|
|
|
|
|
term->raw_mouse_reported_x = x;
|
|
|
|
term->raw_mouse_reported_y = y;
|
|
|
|
|
|
|
|
encstate += 0x20; // motion indicator
|
2019-09-08 19:29:00 +00:00
|
|
|
break;
|
|
|
|
case MA_RELEASE:
|
|
|
|
/* If multiple extensions are enabled, the xterm 1006 is used, so it's okay to check for only that */
|
|
|
|
if (!term->xterm_extended_mouse)
|
|
|
|
encstate = 0x03;
|
|
|
|
term->mouse_is_down = 0;
|
|
|
|
break;
|
|
|
|
case MA_CLICK:
|
|
|
|
if (term->mouse_is_down == braw)
|
|
|
|
return;
|
|
|
|
term->mouse_is_down = braw;
|
|
|
|
break;
|
2014-02-16 16:40:45 +00:00
|
|
|
default:
|
|
|
|
return;
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
if (shift)
|
|
|
|
encstate += 0x04;
|
|
|
|
if (ctrl)
|
|
|
|
encstate += 0x10;
|
|
|
|
r = y + 1;
|
|
|
|
c = x + 1;
|
|
|
|
|
|
|
|
/* Check the extensions in decreasing order of preference. Encoding the release event above assumes that 1006 comes first. */
|
|
|
|
if (term->xterm_extended_mouse) {
|
2024-09-08 16:20:16 +00:00
|
|
|
response = dupprintf("\033[<%d;%d;%d%c", encstate, c, r,
|
|
|
|
a == MA_RELEASE ? 'm' : 'M');
|
2019-09-08 19:29:00 +00:00
|
|
|
} else if (term->urxvt_extended_mouse) {
|
2024-09-08 16:20:16 +00:00
|
|
|
response = dupprintf("\033[%d;%d;%dM", encstate + 32, c, r);
|
2019-09-08 19:29:00 +00:00
|
|
|
} else if (c <= 223 && r <= 223) {
|
2024-09-08 16:20:16 +00:00
|
|
|
response = dupprintf("\033[M%c%c%c", encstate + 32,
|
|
|
|
c + 32, r + 32);
|
|
|
|
}
|
|
|
|
if (response) {
|
|
|
|
ldisc_send(term->ldisc, response, strlen(response), false);
|
|
|
|
sfree(response);
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
2001-05-06 14:20:41 +00:00
|
|
|
}
|
|
|
|
|
2001-10-31 18:50:09 +00:00
|
|
|
/*
|
|
|
|
* Set the selection type (rectangular or normal) at the start
|
|
|
|
* of a selection attempt, from the state of Alt.
|
|
|
|
*/
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
if (!alt ^ !term->rect_select)
|
2019-09-08 19:29:00 +00:00
|
|
|
default_seltype = RECTANGULAR;
|
2001-10-31 18:50:09 +00:00
|
|
|
else
|
2019-09-08 19:29:00 +00:00
|
|
|
default_seltype = LEXICOGRAPHIC;
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->selstate == NO_SELECTION) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->seltype = default_seltype;
|
2001-10-31 18:50:09 +00:00
|
|
|
}
|
|
|
|
|
2003-01-25 16:16:45 +00:00
|
|
|
if (bcooked == MBT_SELECT && a == MA_CLICK) {
|
2019-09-08 19:29:00 +00:00
|
|
|
deselect(term);
|
|
|
|
term->selstate = ABOUT_TO;
|
|
|
|
term->seltype = default_seltype;
|
|
|
|
term->selanchor = selpoint;
|
|
|
|
term->selmode = SM_CHAR;
|
2003-01-25 16:16:45 +00:00
|
|
|
} else if (bcooked == MBT_SELECT && (a == MA_2CLK || a == MA_3CLK)) {
|
2019-09-08 19:29:00 +00:00
|
|
|
deselect(term);
|
|
|
|
term->selmode = (a == MA_2CLK ? SM_WORD : SM_LINE);
|
|
|
|
term->selstate = DRAGGING;
|
|
|
|
term->selstart = term->selanchor = selpoint;
|
|
|
|
term->selend = term->selstart;
|
|
|
|
incpos(term->selend);
|
|
|
|
sel_spread(term);
|
2003-01-25 16:16:45 +00:00
|
|
|
} else if ((bcooked == MBT_SELECT && a == MA_DRAG) ||
|
2019-09-08 19:29:00 +00:00
|
|
|
(bcooked == MBT_EXTEND && a != MA_RELEASE)) {
|
2015-09-28 19:18:58 +00:00
|
|
|
if (a == MA_DRAG &&
|
|
|
|
(term->selstate == NO_SELECTION || term->selstate == SELECTED)) {
|
2015-09-03 18:20:28 +00:00
|
|
|
/*
|
|
|
|
* This can happen if a front end has passed us a MA_DRAG
|
|
|
|
* without a prior MA_CLICK. OS X GTK does so, for
|
|
|
|
* example, if the initial button press was eaten by the
|
|
|
|
* WM when it activated the window in the first place. The
|
|
|
|
* nicest thing to do in this situation is to ignore
|
|
|
|
* further drags, and wait for the user to click in the
|
|
|
|
* window again properly if they want to select.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
if (term->selstate == ABOUT_TO && poseq(term->selanchor, selpoint))
|
|
|
|
return;
|
|
|
|
if (bcooked == MBT_EXTEND && a != MA_DRAG &&
|
|
|
|
term->selstate == SELECTED) {
|
|
|
|
if (term->seltype == LEXICOGRAPHIC) {
|
|
|
|
/*
|
|
|
|
* For normal selection, we extend by moving
|
|
|
|
* whichever end of the current selection is closer
|
|
|
|
* to the mouse.
|
|
|
|
*/
|
|
|
|
if (posdiff(selpoint, term->selstart) <
|
|
|
|
posdiff(term->selend, term->selstart) / 2) {
|
|
|
|
term->selanchor = term->selend;
|
|
|
|
decpos(term->selanchor);
|
|
|
|
} else {
|
|
|
|
term->selanchor = term->selstart;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* For rectangular selection, we have a choice of
|
|
|
|
* _four_ places to put selanchor and selpoint: the
|
|
|
|
* four corners of the selection.
|
|
|
|
*/
|
|
|
|
if (2*selpoint.x < term->selstart.x + term->selend.x)
|
|
|
|
term->selanchor.x = term->selend.x-1;
|
|
|
|
else
|
|
|
|
term->selanchor.x = term->selstart.x;
|
|
|
|
|
|
|
|
if (2*selpoint.y < term->selstart.y + term->selend.y)
|
|
|
|
term->selanchor.y = term->selend.y;
|
|
|
|
else
|
|
|
|
term->selanchor.y = term->selstart.y;
|
|
|
|
}
|
|
|
|
term->selstate = DRAGGING;
|
|
|
|
}
|
|
|
|
if (term->selstate != ABOUT_TO && term->selstate != DRAGGING)
|
|
|
|
term->selanchor = selpoint;
|
|
|
|
term->selstate = DRAGGING;
|
|
|
|
if (term->seltype == LEXICOGRAPHIC) {
|
|
|
|
/*
|
|
|
|
* For normal selection, we set (selstart,selend) to
|
|
|
|
* (selpoint,selanchor) in some order.
|
|
|
|
*/
|
|
|
|
if (poslt(selpoint, term->selanchor)) {
|
|
|
|
term->selstart = selpoint;
|
|
|
|
term->selend = term->selanchor;
|
|
|
|
incpos(term->selend);
|
|
|
|
} else {
|
|
|
|
term->selstart = term->selanchor;
|
|
|
|
term->selend = selpoint;
|
|
|
|
incpos(term->selend);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* For rectangular selection, we may need to
|
|
|
|
* interchange x and y coordinates (if the user has
|
|
|
|
* dragged in the -x and +y directions, or vice versa).
|
|
|
|
*/
|
|
|
|
term->selstart.x = min(term->selanchor.x, selpoint.x);
|
|
|
|
term->selend.x = 1+max(term->selanchor.x, selpoint.x);
|
|
|
|
term->selstart.y = min(term->selanchor.y, selpoint.y);
|
|
|
|
term->selend.y = max(term->selanchor.y, selpoint.y);
|
|
|
|
}
|
|
|
|
sel_spread(term);
|
2003-01-25 16:16:45 +00:00
|
|
|
} else if ((bcooked == MBT_SELECT || bcooked == MBT_EXTEND) &&
|
2019-09-08 19:29:00 +00:00
|
|
|
a == MA_RELEASE) {
|
|
|
|
if (term->selstate == DRAGGING) {
|
|
|
|
/*
|
|
|
|
* We've completed a selection. We now transfer the
|
|
|
|
* data to the clipboard.
|
|
|
|
*/
|
|
|
|
clipme(term, term->selstart, term->selend,
|
|
|
|
(term->seltype == RECTANGULAR), false,
|
2017-12-10 15:45:45 +00:00
|
|
|
term->mouse_select_clipboards,
|
|
|
|
term->n_mouse_select_clipboards);
|
2019-09-08 19:29:00 +00:00
|
|
|
term->selstate = SELECTED;
|
|
|
|
} else
|
|
|
|
term->selstate = NO_SELECTION;
|
2003-01-25 16:16:45 +00:00
|
|
|
} else if (bcooked == MBT_PASTE
|
2019-09-08 19:29:00 +00:00
|
|
|
&& (a == MA_CLICK
|
2002-10-13 11:24:25 +00:00
|
|
|
#if MULTICLICK_ONLY_EVENT
|
2019-09-08 19:29:00 +00:00
|
|
|
|| a == MA_2CLK || a == MA_3CLK
|
2002-10-13 11:24:25 +00:00
|
|
|
#endif
|
2019-09-08 19:29:00 +00:00
|
|
|
)) {
|
|
|
|
term_request_paste(term, term->mouse_paste_clipboard);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2013-03-10 11:04:07 +00:00
|
|
|
/*
|
|
|
|
* Since terminal output is suppressed during drag-selects, we
|
|
|
|
* should make sure to write any pending output if one has just
|
|
|
|
* finished.
|
|
|
|
*/
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
term_out(term, false);
|
Fold ancillary window changes into main redraw.
This fixes a long-standing inconsistency in updates to the terminal
window: redrawing of actual text was deferred for 1/50 second, but all
the other kinds of change the terminal can make to the window
(position, size, z-order, title, mouse pointer shape, scrollbar...)
were enacted immediately. In particular, this could mean that two
updates requested by the terminal output stream happened in reverse
order.
Now they're all done as part of term_update, which should mean that
things requested in the same chunk of terminal input happen at the
same time, or at the very least, not in reverse order compared to the
order the requests came in.
Also, the same timer-based UPDATE_DELAY mechanism that applies to the
text updates now applies to all the other window modifications, which
should prevent any of those from being the limiting factor to how fast
this terminal implementation can process input data (which is exactly
why I set up that system for the main text update).
This makes everything happen with a bit more latency, but I'm about to
reverse that in a follow-up commit.
2021-02-07 19:59:21 +00:00
|
|
|
term_schedule_update(term);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
Cancel drag-select when the context menu pops up.
I got a pterm into a stuck state this morning by an accidental mouse
action. I'd intended to press Ctrl + right-click to pop up the context
menu, but I accidentally pressed down the left button first, starting
a selection drag, and then while the left button was still held down,
pressed down the right button as well, triggering the menu.
The effect was that the context menu appeared while term->selstate was
set to DRAGGING, in which state terminal output is suppressed, and
which is only unset by a mouse-button release event. But then that
release event went to the popup menu, and the terminal window never
got it. So the terminal stayed stuck forever - or rather, until I
guessed the cause and did another selection drag to reset it.
This happened to me on GTK, but once I knew how I'd done it, I found I
could reproduce the same misbehaviour on Windows by the same method.
Added a simplistic fix, on both platforms, that cancels a selection
drag if the popup menu is summoned part way through it.
2022-03-29 17:05:11 +00:00
|
|
|
void term_cancel_selection_drag(Terminal *term)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* In unusual circumstances, a mouse drag might be interrupted by
|
|
|
|
* something that steals the rest of the mouse gesture. An example
|
|
|
|
* is the GTK popup menu appearing. In that situation, we'll never
|
|
|
|
* receive the MA_RELEASE that finishes the DRAGGING state, which
|
|
|
|
* means terminal output could be suppressed indefinitely. Call
|
|
|
|
* this function from the front end in such situations to restore
|
|
|
|
* sensibleness.
|
|
|
|
*/
|
|
|
|
if (term->selstate == DRAGGING)
|
|
|
|
term->selstate = NO_SELECTION;
|
|
|
|
term_out(term, false);
|
|
|
|
term_schedule_update(term);
|
|
|
|
}
|
|
|
|
|
2021-10-23 09:52:11 +00:00
|
|
|
static int shift_bitmap(bool shift, bool ctrl, bool alt, bool *consumed_alt)
|
New config option for shifted arrow key handling.
This commit introduces a new config option for how to handle shifted
arrow keys.
In the default mode (SHARROW_APPLICATION), we do what we've always
done: Ctrl flips the arrow keys between sending their most usual
escape sequences (ESC [ A ... ESC [ D) and sending the 'application
cursor keys' sequences (ESC O A ... ESC O D). Whichever of those modes
is currently configured, Ctrl+arrow sends the other one.
In the new mode (SHARROW_BITMAP), application cursor key mode is
unaffected by any shift keys, but the default sequences acquire two
numeric arguments. The first argument is 1 (reflecting the fact that a
shifted arrow key still notionally moves just 1 character cell); the
second is the bitmap (1 for Shift) + (2 for Alt) + (4 for Ctrl),
offset by 1. (Except that if _none_ of those modifiers is pressed,
both numeric arguments are simply omitted.)
The new bitmap mode is what current xterm generates, and also what
Windows ConPTY seems to expect. If you start an ordinary Command
Prompt and launch into WSL, those are the sequences it will generate
for shifted arrow keys; conversely, if you run a Command Prompt within
a ConPTY, then these sequences for Ctrl+arrow will have the effect you
expect in cmd.exe command-line editing (going backward or forward a
word). For that reason, I enable this mode unconditionally when
launching Windows pterm.
2021-10-18 19:00:25 +00:00
|
|
|
{
|
|
|
|
int bitmap = (shift ? 1 : 0) + (alt ? 2 : 0) + (ctrl ? 4 : 0);
|
|
|
|
if (bitmap)
|
|
|
|
bitmap++;
|
2021-10-23 09:52:11 +00:00
|
|
|
if (alt && consumed_alt)
|
|
|
|
*consumed_alt = true;
|
New config option for shifted arrow key handling.
This commit introduces a new config option for how to handle shifted
arrow keys.
In the default mode (SHARROW_APPLICATION), we do what we've always
done: Ctrl flips the arrow keys between sending their most usual
escape sequences (ESC [ A ... ESC [ D) and sending the 'application
cursor keys' sequences (ESC O A ... ESC O D). Whichever of those modes
is currently configured, Ctrl+arrow sends the other one.
In the new mode (SHARROW_BITMAP), application cursor key mode is
unaffected by any shift keys, but the default sequences acquire two
numeric arguments. The first argument is 1 (reflecting the fact that a
shifted arrow key still notionally moves just 1 character cell); the
second is the bitmap (1 for Shift) + (2 for Alt) + (4 for Ctrl),
offset by 1. (Except that if _none_ of those modifiers is pressed,
both numeric arguments are simply omitted.)
The new bitmap mode is what current xterm generates, and also what
Windows ConPTY seems to expect. If you start an ordinary Command
Prompt and launch into WSL, those are the sequences it will generate
for shifted arrow keys; conversely, if you run a Command Prompt within
a ConPTY, then these sequences for Ctrl+arrow will have the effect you
expect in cmd.exe command-line editing (going backward or forward a
word). For that reason, I enable this mode unconditionally when
launching Windows pterm.
2021-10-18 19:00:25 +00:00
|
|
|
return bitmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
int format_arrow_key(char *buf, Terminal *term, int xkey,
|
2021-10-23 09:52:11 +00:00
|
|
|
bool shift, bool ctrl, bool alt, bool *consumed_alt)
|
2010-03-06 15:50:26 +00:00
|
|
|
{
|
|
|
|
char *p = buf;
|
|
|
|
|
|
|
|
if (term->vt52_mode)
|
2019-09-08 19:29:00 +00:00
|
|
|
p += sprintf(p, "\x1B%c", xkey);
|
2010-03-06 15:50:26 +00:00
|
|
|
else {
|
2019-09-08 19:29:00 +00:00
|
|
|
bool app_flg = (term->app_cursor_keys && !term->no_applic_c);
|
2010-03-06 15:50:26 +00:00
|
|
|
#if 0
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* RDB: VT100 & VT102 manuals both state the app cursor
|
|
|
|
* keys only work if the app keypad is on.
|
|
|
|
*
|
|
|
|
* SGT: That may well be true, but xterm disagrees and so
|
|
|
|
* does at least one application, so I've #if'ed this out
|
|
|
|
* and the behaviour is back to PuTTY's original: app
|
|
|
|
* cursor and app keypad are independently switchable
|
|
|
|
* modes. If anyone complains about _this_ I'll have to
|
|
|
|
* put in a configurable option.
|
|
|
|
*/
|
|
|
|
if (!term->app_keypad_keys)
|
|
|
|
app_flg = 0;
|
2010-03-06 15:50:26 +00:00
|
|
|
#endif
|
New config option for shifted arrow key handling.
This commit introduces a new config option for how to handle shifted
arrow keys.
In the default mode (SHARROW_APPLICATION), we do what we've always
done: Ctrl flips the arrow keys between sending their most usual
escape sequences (ESC [ A ... ESC [ D) and sending the 'application
cursor keys' sequences (ESC O A ... ESC O D). Whichever of those modes
is currently configured, Ctrl+arrow sends the other one.
In the new mode (SHARROW_BITMAP), application cursor key mode is
unaffected by any shift keys, but the default sequences acquire two
numeric arguments. The first argument is 1 (reflecting the fact that a
shifted arrow key still notionally moves just 1 character cell); the
second is the bitmap (1 for Shift) + (2 for Alt) + (4 for Ctrl),
offset by 1. (Except that if _none_ of those modifiers is pressed,
both numeric arguments are simply omitted.)
The new bitmap mode is what current xterm generates, and also what
Windows ConPTY seems to expect. If you start an ordinary Command
Prompt and launch into WSL, those are the sequences it will generate
for shifted arrow keys; conversely, if you run a Command Prompt within
a ConPTY, then these sequences for Ctrl+arrow will have the effect you
expect in cmd.exe command-line editing (going backward or forward a
word). For that reason, I enable this mode unconditionally when
launching Windows pterm.
2021-10-18 19:00:25 +00:00
|
|
|
|
|
|
|
int bitmap = 0;
|
|
|
|
|
|
|
|
/* Adjustment based on Shift, Ctrl and/or Alt */
|
|
|
|
switch (term->sharrow_type) {
|
|
|
|
case SHARROW_APPLICATION:
|
|
|
|
if (ctrl)
|
|
|
|
app_flg = !app_flg;
|
|
|
|
break;
|
|
|
|
case SHARROW_BITMAP:
|
2021-10-23 09:52:11 +00:00
|
|
|
bitmap = shift_bitmap(shift, ctrl, alt, consumed_alt);
|
New config option for shifted arrow key handling.
This commit introduces a new config option for how to handle shifted
arrow keys.
In the default mode (SHARROW_APPLICATION), we do what we've always
done: Ctrl flips the arrow keys between sending their most usual
escape sequences (ESC [ A ... ESC [ D) and sending the 'application
cursor keys' sequences (ESC O A ... ESC O D). Whichever of those modes
is currently configured, Ctrl+arrow sends the other one.
In the new mode (SHARROW_BITMAP), application cursor key mode is
unaffected by any shift keys, but the default sequences acquire two
numeric arguments. The first argument is 1 (reflecting the fact that a
shifted arrow key still notionally moves just 1 character cell); the
second is the bitmap (1 for Shift) + (2 for Alt) + (4 for Ctrl),
offset by 1. (Except that if _none_ of those modifiers is pressed,
both numeric arguments are simply omitted.)
The new bitmap mode is what current xterm generates, and also what
Windows ConPTY seems to expect. If you start an ordinary Command
Prompt and launch into WSL, those are the sequences it will generate
for shifted arrow keys; conversely, if you run a Command Prompt within
a ConPTY, then these sequences for Ctrl+arrow will have the effect you
expect in cmd.exe command-line editing (going backward or forward a
word). For that reason, I enable this mode unconditionally when
launching Windows pterm.
2021-10-18 19:00:25 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-03-06 15:50:26 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (app_flg)
|
|
|
|
p += sprintf(p, "\x1BO%c", xkey);
|
New config option for shifted arrow key handling.
This commit introduces a new config option for how to handle shifted
arrow keys.
In the default mode (SHARROW_APPLICATION), we do what we've always
done: Ctrl flips the arrow keys between sending their most usual
escape sequences (ESC [ A ... ESC [ D) and sending the 'application
cursor keys' sequences (ESC O A ... ESC O D). Whichever of those modes
is currently configured, Ctrl+arrow sends the other one.
In the new mode (SHARROW_BITMAP), application cursor key mode is
unaffected by any shift keys, but the default sequences acquire two
numeric arguments. The first argument is 1 (reflecting the fact that a
shifted arrow key still notionally moves just 1 character cell); the
second is the bitmap (1 for Shift) + (2 for Alt) + (4 for Ctrl),
offset by 1. (Except that if _none_ of those modifiers is pressed,
both numeric arguments are simply omitted.)
The new bitmap mode is what current xterm generates, and also what
Windows ConPTY seems to expect. If you start an ordinary Command
Prompt and launch into WSL, those are the sequences it will generate
for shifted arrow keys; conversely, if you run a Command Prompt within
a ConPTY, then these sequences for Ctrl+arrow will have the effect you
expect in cmd.exe command-line editing (going backward or forward a
word). For that reason, I enable this mode unconditionally when
launching Windows pterm.
2021-10-18 19:00:25 +00:00
|
|
|
else if (bitmap)
|
|
|
|
p += sprintf(p, "\x1B[1;%d%c", bitmap, xkey);
|
2019-09-08 19:29:00 +00:00
|
|
|
else
|
|
|
|
p += sprintf(p, "\x1B[%c", xkey);
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return p - buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
int format_function_key(char *buf, Terminal *term, int key_number,
|
2021-10-23 10:04:53 +00:00
|
|
|
bool shift, bool ctrl, bool alt, bool *consumed_alt)
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
{
|
|
|
|
char *p = buf;
|
|
|
|
|
|
|
|
static const int key_number_to_tilde_code[] = {
|
|
|
|
-1, /* no such key as F0 */
|
|
|
|
11, 12, 13, 14, 15, /*gap*/ 17, 18, 19, 20, 21, /*gap*/
|
|
|
|
23, 24, 25, 26, /*gap*/ 28, 29, /*gap*/ 31, 32, 33, 34,
|
|
|
|
};
|
|
|
|
|
|
|
|
assert(key_number > 0);
|
|
|
|
assert(key_number < lenof(key_number_to_tilde_code));
|
|
|
|
|
2021-10-23 10:04:53 +00:00
|
|
|
int index = key_number;
|
2022-02-10 18:51:19 +00:00
|
|
|
if (term->funky_type != FUNKY_XTERM_216 && term->funky_type != FUNKY_SCO) {
|
2021-10-23 10:04:53 +00:00
|
|
|
if (shift && index <= 10) {
|
|
|
|
shift = false;
|
|
|
|
index += 10;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
int code = key_number_to_tilde_code[index];
|
|
|
|
|
|
|
|
if (term->funky_type == FUNKY_SCO) {
|
|
|
|
/* SCO function keys */
|
|
|
|
static const char sco_codes[] =
|
|
|
|
"MNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz@[\\]^_`{";
|
|
|
|
index = (key_number >= 1 && key_number <= 12) ? key_number - 1 : 0;
|
|
|
|
if (shift) index += 12;
|
|
|
|
if (ctrl) index += 24;
|
|
|
|
p += sprintf(p, "\x1B[%c", sco_codes[index]);
|
|
|
|
} else if ((term->vt52_mode || term->funky_type == FUNKY_VT100P) &&
|
|
|
|
code >= 11 && code <= 24) {
|
|
|
|
int offt = 0;
|
|
|
|
if (code > 15)
|
|
|
|
offt++;
|
|
|
|
if (code > 21)
|
|
|
|
offt++;
|
|
|
|
if (term->vt52_mode)
|
|
|
|
p += sprintf(p, "\x1B%c", code + 'P' - 11 - offt);
|
|
|
|
else
|
|
|
|
p += sprintf(p, "\x1BO%c", code + 'P' - 11 - offt);
|
|
|
|
} else if (term->funky_type == FUNKY_LINUX && code >= 11 && code <= 15) {
|
|
|
|
p += sprintf(p, "\x1B[[%c", code + 'A' - 11);
|
2021-10-23 10:04:53 +00:00
|
|
|
} else if ((term->funky_type == FUNKY_XTERM ||
|
|
|
|
term->funky_type == FUNKY_XTERM_216) &&
|
|
|
|
code >= 11 && code <= 14) {
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
if (term->vt52_mode)
|
|
|
|
p += sprintf(p, "\x1B%c", code + 'P' - 11);
|
2021-10-23 10:04:53 +00:00
|
|
|
else {
|
|
|
|
int bitmap = 0;
|
|
|
|
if (term->funky_type == FUNKY_XTERM_216)
|
|
|
|
bitmap = shift_bitmap(shift, ctrl, alt, consumed_alt);
|
|
|
|
if (bitmap)
|
|
|
|
p += sprintf(p, "\x1B[1;%d%c", bitmap, code + 'P' - 11);
|
|
|
|
else
|
|
|
|
p += sprintf(p, "\x1BO%c", code + 'P' - 11);
|
|
|
|
}
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
} else {
|
2021-10-23 10:04:53 +00:00
|
|
|
int bitmap = 0;
|
|
|
|
if (term->funky_type == FUNKY_XTERM_216)
|
|
|
|
bitmap = shift_bitmap(shift, ctrl, alt, consumed_alt);
|
|
|
|
if (bitmap)
|
|
|
|
p += sprintf(p, "\x1B[%d;%d~", code, bitmap);
|
|
|
|
else
|
|
|
|
p += sprintf(p, "\x1B[%d~", code);
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return p - buf;
|
|
|
|
}
|
|
|
|
|
Send xterm 216+ modifiers in small-keypad key escape sequences.
In the 'xterm 216+' function key mode, a function key pressed with a
combination of Shift, Ctrl and Alt has its usual sequence like
ESC[n~ (for some integer n) turned into ESC[n;m~ where m-1 is a 3-bit
bitmap of currently pressed modifier keys.
This mode now also applies to the keys on the small keypad above the
arrow keys (Ins, Home, PgUp etc). If xterm 216+ mode is selected,
those keys are modified in the same way as the function keys.
As with the function keys, this doesn't guarantee that PuTTY will
_receive_ any particular shifted key of this kind, and not repurpose
it. Just as Alt+F4 still closes the window (at least on Windows)
rather than sending a modified F4 sequence, Shift+Ins will still
perform a paste action rather than sending a modified Ins sequence,
Shift-PgUp will still scroll the scrollback, etc. But the keys not
already used by PuTTY for other purposes should now have their
modern-xterm behaviour in modern-xterm mode.
Thanks to H.Merijn Brand for developing and testing a version of this
patch.
2022-07-24 12:54:32 +00:00
|
|
|
int format_small_keypad_key(char *buf, Terminal *term, SmallKeypadKey key,
|
|
|
|
bool shift, bool ctrl, bool alt,
|
|
|
|
bool *consumed_alt)
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
{
|
|
|
|
char *p = buf;
|
|
|
|
|
|
|
|
int code;
|
|
|
|
switch (key) {
|
|
|
|
case SKK_HOME: code = 1; break;
|
|
|
|
case SKK_INSERT: code = 2; break;
|
|
|
|
case SKK_DELETE: code = 3; break;
|
|
|
|
case SKK_END: code = 4; break;
|
|
|
|
case SKK_PGUP: code = 5; break;
|
|
|
|
case SKK_PGDN: code = 6; break;
|
2019-01-03 08:12:19 +00:00
|
|
|
default: unreachable("bad small keypad key enum value");
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reorder edit keys to physical order */
|
|
|
|
if (term->funky_type == FUNKY_VT400 && code <= 6)
|
|
|
|
code = "\0\2\1\4\5\3\6"[code];
|
|
|
|
|
|
|
|
if (term->vt52_mode && code > 0 && code <= 6) {
|
|
|
|
p += sprintf(p, "\x1B%c", " HLMEIG"[code]);
|
|
|
|
} else if (term->funky_type == FUNKY_SCO) {
|
|
|
|
static const char codes[] = "HL.FIG";
|
|
|
|
if (code == 3) {
|
|
|
|
*p++ = '\x7F';
|
|
|
|
} else {
|
|
|
|
p += sprintf(p, "\x1B[%c", codes[code-1]);
|
|
|
|
}
|
|
|
|
} else if ((code == 1 || code == 4) && term->rxvt_homeend) {
|
|
|
|
p += sprintf(p, code == 1 ? "\x1B[H" : "\x1BOw");
|
|
|
|
} else {
|
Send xterm 216+ modifiers in small-keypad key escape sequences.
In the 'xterm 216+' function key mode, a function key pressed with a
combination of Shift, Ctrl and Alt has its usual sequence like
ESC[n~ (for some integer n) turned into ESC[n;m~ where m-1 is a 3-bit
bitmap of currently pressed modifier keys.
This mode now also applies to the keys on the small keypad above the
arrow keys (Ins, Home, PgUp etc). If xterm 216+ mode is selected,
those keys are modified in the same way as the function keys.
As with the function keys, this doesn't guarantee that PuTTY will
_receive_ any particular shifted key of this kind, and not repurpose
it. Just as Alt+F4 still closes the window (at least on Windows)
rather than sending a modified F4 sequence, Shift+Ins will still
perform a paste action rather than sending a modified Ins sequence,
Shift-PgUp will still scroll the scrollback, etc. But the keys not
already used by PuTTY for other purposes should now have their
modern-xterm behaviour in modern-xterm mode.
Thanks to H.Merijn Brand for developing and testing a version of this
patch.
2022-07-24 12:54:32 +00:00
|
|
|
if (term->vt52_mode) {
|
|
|
|
p += sprintf(p, "\x1B[%d~", code);
|
|
|
|
} else {
|
|
|
|
int bitmap = 0;
|
|
|
|
if (term->funky_type == FUNKY_XTERM_216)
|
|
|
|
bitmap = shift_bitmap(shift, ctrl, alt, consumed_alt);
|
|
|
|
if (bitmap)
|
|
|
|
p += sprintf(p, "\x1B[%d;%d~", code, bitmap);
|
|
|
|
else
|
|
|
|
p += sprintf(p, "\x1B[%d~", code);
|
|
|
|
}
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return p - buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
int format_numeric_keypad_key(char *buf, Terminal *term, char key,
|
|
|
|
bool shift, bool ctrl)
|
|
|
|
{
|
|
|
|
char *p = buf;
|
|
|
|
bool app_keypad = (term->app_keypad_keys && !term->no_applic_k);
|
|
|
|
|
|
|
|
if (term->nethack_keypad && (key >= '1' && key <= '9')) {
|
|
|
|
static const char nh_base[] = "bjnh.lyku";
|
|
|
|
char c = nh_base[key - '1'];
|
|
|
|
if (ctrl && c != '.')
|
|
|
|
c &= 0x1F;
|
|
|
|
else if (shift && c != '.')
|
|
|
|
c += 'A'-'a';
|
|
|
|
*p++ = c;
|
|
|
|
} else {
|
|
|
|
int xkey = 0;
|
|
|
|
|
|
|
|
if (term->funky_type == FUNKY_VT400 ||
|
|
|
|
(term->funky_type <= FUNKY_LINUX && app_keypad)) {
|
|
|
|
switch (key) {
|
|
|
|
case 'G': xkey = 'P'; break;
|
|
|
|
case '/': xkey = 'Q'; break;
|
|
|
|
case '*': xkey = 'R'; break;
|
|
|
|
case '-': xkey = 'S'; break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (app_keypad) {
|
|
|
|
switch (key) {
|
|
|
|
case '0': xkey = 'p'; break;
|
|
|
|
case '1': xkey = 'q'; break;
|
|
|
|
case '2': xkey = 'r'; break;
|
|
|
|
case '3': xkey = 's'; break;
|
|
|
|
case '4': xkey = 't'; break;
|
|
|
|
case '5': xkey = 'u'; break;
|
|
|
|
case '6': xkey = 'v'; break;
|
|
|
|
case '7': xkey = 'w'; break;
|
|
|
|
case '8': xkey = 'x'; break;
|
|
|
|
case '9': xkey = 'y'; break;
|
|
|
|
case '.': xkey = 'n'; break;
|
|
|
|
case '\r': xkey = 'M'; break;
|
|
|
|
|
|
|
|
case '+':
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* Keypad + is tricky. It covers a space that would
|
|
|
|
* be taken up on the VT100 by _two_ keys; so we
|
|
|
|
* let Shift select between the two. Worse still,
|
|
|
|
* in xterm function key mode we change which two...
|
|
|
|
*/
|
Centralise key escape sequences into terminal.c.
A long time ago, in commit 4d77b6567, I moved the generation of the
arrow-key escape sequences into a function format_arrow_key(). Mostly
the reason for that was a special purpose I had in mind at the time
which involved auto-generating the same sequences in response to
things other than a keypress, but I always thought it would be nice to
centralise a lot more of PuTTY's complicated keyboard handling in the
same way - at least the handling of the function keys and their
numerous static and dynamic config options.
In this year's general spirit of tidying up and refactoring, I think
it's finally time. So here I introduce three more centralised
functions for dealing with the numbered function keys, the small
keypad (Ins, Home, PgUp etc) and the numeric keypad. Lots of horrible
and duplicated code from the key handling functions in window.c and
gtkwin.c is now more sensibly centralised: each platform keyboard
handler concerns itself with the local format of a keyboard event and
platform-specific enumeration of key codes, and once it's decided what
the logical key press actually _is_, it hands off to the new functions
in terminal.c to generate the appropriate escape code.
Mostly this is intended to be a refactoring without functional change,
leaving the keyboard handling how it's always been. But in cases where
the Windows and GTK handlers were accidentally inconsistent, I've
fixed the inconsistency rather than carefully keeping both sides how
they were. Known consistency fixes:
- swapping the arrow keys between normal (ESC [ A) and application
(ESC O A) is now done by pressing Ctrl with them, and _not_ by
pressing Shift. That was how it was always supposed to work, and
how it's worked on GTK all along, but on Windows it's been done by
Shift as well since 2010, due to a bug at the call site of
format_arrow_key() introduced when I originally wrote that function.
- in Xterm function key mode plus application keypad mode, the /*-
keys on the numeric keypad now send ESC O {o,j,m} in place of ESC O
{Q,R,S}. That's how the Windows keyboard handler has worked all
along (it was a deliberate behaviour tweak for the Xterm-like
function key mode, because in that mode ESC O {Q,R,S} are generated
by F2-F4). But the GTK keyboard handler omitted that particular
special case and was still sending ESC O {Q,R,S} for those keys in
all application keypad modes.
- also in Xterm function key mode plus app keypad mode, we only
generates the app-keypad escape sequences if Num Lock is on; with
Num Lock off, the numeric keypad becomes arrow keys and
Home/End/etc, just as it would in non-app-keypad mode. Windows has
done this all along, but again, GTK lacked that special case.
2018-12-08 08:25:32 +00:00
|
|
|
if (term->funky_type == FUNKY_XTERM)
|
|
|
|
xkey = shift ? 'l' : 'k';
|
|
|
|
else
|
|
|
|
xkey = shift ? 'm' : 'l';
|
|
|
|
break;
|
|
|
|
|
|
|
|
case '/':
|
|
|
|
if (term->funky_type == FUNKY_XTERM)
|
|
|
|
xkey = 'o';
|
|
|
|
break;
|
|
|
|
case '*':
|
|
|
|
if (term->funky_type == FUNKY_XTERM)
|
|
|
|
xkey = 'j';
|
|
|
|
break;
|
|
|
|
case '-':
|
|
|
|
if (term->funky_type == FUNKY_XTERM)
|
|
|
|
xkey = 'm';
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xkey) {
|
|
|
|
if (term->vt52_mode) {
|
|
|
|
if (xkey >= 'P' && xkey <= 'S')
|
|
|
|
p += sprintf(p, "\x1B%c", xkey);
|
|
|
|
else
|
|
|
|
p += sprintf(p, "\x1B?%c", xkey);
|
|
|
|
} else
|
|
|
|
p += sprintf(p, "\x1BO%c", xkey);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-06 15:50:26 +00:00
|
|
|
return p - buf;
|
|
|
|
}
|
|
|
|
|
Refactor terminal input to remove ldiscucs.c.
The functions that previously lived in it now live in terminal.c
itself; they've been renamed term_keyinput and term_keyinputw, and
their function is to add data to the terminal's user input buffer from
a char or wchar_t string respectively.
They sit more comfortably in terminal.c anyway, because their whole
point is to translate into the character encoding that the terminal is
currently configured to use. Also, making them part of the terminal
code means they can also take care of calling term_seen_key_event(),
which simplifies most of the call sites in the GTK and Windows front
ends.
Generation of text _inside_ terminal.c, from responses to query escape
sequences, is therefore not done by calling those external entry
points: we send those responses directly to the ldisc, so that they
don't count as keypresses for all the user-facing purposes like bell
overload handling and scrollback reset. To make _that_ convenient,
I've arranged that most of the code that previously lived in
lpage_send and luni_send is now in separate translation functions, so
those can still be called from situations where you're not going to do
the default thing with the translated data.
(However, pasted data _does_ still count as close enough to a keypress
to call term_seen_key_event - but it clears the 'interactive' flag
when the data is passed on to the line discipline, which tweaks a
minor detail of control-char handling in line ending mode but mostly
just means pastes aren't interrupted.)
2019-06-17 19:13:55 +00:00
|
|
|
void term_keyinputw(Terminal *term, const wchar_t *widebuf, int len)
|
|
|
|
{
|
|
|
|
strbuf *buf = term_input_data_from_unicode(term, widebuf, len);
|
|
|
|
if (buf->len)
|
|
|
|
term_keyinput_internal(term, buf->s, buf->len, true);
|
|
|
|
strbuf_free(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
void term_keyinput(Terminal *term, int codepage, const void *str, int len)
|
|
|
|
{
|
|
|
|
if (codepage < 0 || codepage == term->ucsdata->line_codepage) {
|
|
|
|
/*
|
|
|
|
* This text needs no translation, either because it's already
|
|
|
|
* in the right character set, or because we got the special
|
|
|
|
* codepage value -1 from our caller which means 'this data
|
|
|
|
* should be charset-agnostic, just send it raw' (for really
|
|
|
|
* simple things like control characters).
|
|
|
|
*/
|
|
|
|
term_keyinput_internal(term, str, len, true);
|
|
|
|
} else {
|
|
|
|
strbuf *buf = term_input_data_from_charset(term, codepage, str, len);
|
|
|
|
if (buf->len)
|
|
|
|
term_keyinput_internal(term, buf->s, buf->len, true);
|
|
|
|
strbuf_free(buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
void term_nopaste(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2002-10-22 16:11:33 +00:00
|
|
|
if (term->paste_len == 0)
|
2019-09-08 19:29:00 +00:00
|
|
|
return;
|
2002-10-22 16:11:33 +00:00
|
|
|
sfree(term->paste_buffer);
|
2019-06-17 19:21:06 +00:00
|
|
|
term_bracketed_paste_stop(term);
|
2002-10-22 16:11:33 +00:00
|
|
|
term->paste_buffer = NULL;
|
|
|
|
term->paste_len = 0;
|
2000-07-26 12:13:51 +00:00
|
|
|
}
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
static void deselect(Terminal *term)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2002-10-22 16:11:33 +00:00
|
|
|
term->selstate = NO_SELECTION;
|
|
|
|
term->selstart.x = term->selstart.y = term->selend.x = term->selend.y = 0;
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
|
|
|
|
2017-12-09 12:00:13 +00:00
|
|
|
void term_lost_clipboard_ownership(Terminal *term, int clipboard)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2017-12-10 15:45:45 +00:00
|
|
|
if (!(term->n_mouse_select_clipboards > 1 &&
|
|
|
|
clipboard == term->mouse_select_clipboards[1]))
|
2017-12-09 12:00:13 +00:00
|
|
|
return;
|
|
|
|
|
2002-10-22 16:11:33 +00:00
|
|
|
deselect(term);
|
|
|
|
term_update(term);
|
2013-03-10 11:04:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since terminal output is suppressed during drag-selects, we
|
|
|
|
* should make sure to write any pending output if one has just
|
|
|
|
* finished.
|
|
|
|
*/
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
term_out(term, false);
|
1999-01-08 13:02:13 +00:00
|
|
|
}
|
2000-10-20 13:51:46 +00:00
|
|
|
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
static void term_added_data(Terminal *term, bool called_from_term_data)
|
2001-05-06 14:35:20 +00:00
|
|
|
{
|
2003-03-29 18:30:14 +00:00
|
|
|
if (!term->in_term_out) {
|
2019-09-08 19:29:00 +00:00
|
|
|
term->in_term_out = true;
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
term_out(term, called_from_term_data);
|
2019-09-08 19:29:00 +00:00
|
|
|
term->in_term_out = false;
|
2003-03-29 18:30:14 +00:00
|
|
|
}
|
2018-09-19 17:22:36 +00:00
|
|
|
}
|
|
|
|
|
2021-09-16 13:50:59 +00:00
|
|
|
size_t term_data(Terminal *term, const void *data, size_t len)
|
2018-09-19 17:22:36 +00:00
|
|
|
{
|
|
|
|
bufchain_add(&term->inbuf, data, len);
|
Proper buffer management between terminal and backend.
The return value of term_data() is used as the return value from the
GUI-terminal versions of the Seat output method, which means backends
will take it to be the amount of standard-output data currently
buffered, and exert back-pressure on the remote peer if it gets too
big (e.g. by ceasing to extend the window in that particular SSH-2
channel).
Historically, as a comment in term_data() explained, we always just
returned 0 from that function, on the basis that we were processing
all the terminal data through our terminal emulation code immediately,
and never retained any of it in the buffer at all. If the terminal
emulation code were to start running slowly, then it would slow down
the _whole_ PuTTY system, due to single-threadedness, and
back-pressure of a sort would be exerted on the remote by it simply
failing to get round to reading from the network socket. But by the
time we got back to the top level of term_data(), we'd have finished
reading all the data we had, so it was still appropriate to return 0.
That comment is still correct if you're thinking about the limiting
factor on terminal data processing being the CPU usage in term_out().
But now that's no longer the whole story, because sometimes we leave
data in term->inbuf without having processed it: during drag-selects
in the terminal window, and (just introduced) while waiting for the
response to a pending window resize request. For both those reasons,
we _don't_ always have a buffer size of zero when we return from
term_data().
So now that hole in our buffer size management is filled in:
term_data() returns the true size of the remaining unprocessed
terminal output, so that back-pressure will be exerted if the terminal
is currently not consuming it. And when processing resumes and we
start to clear our backlog, we call backend_unthrottle to let the
backend know it can relax the back-pressure if necessary.
2021-12-12 10:57:23 +00:00
|
|
|
term_added_data(term, true);
|
|
|
|
return bufchain_size(&term->inbuf);
|
2000-10-20 13:51:46 +00:00
|
|
|
}
|
2002-10-26 12:58:13 +00:00
|
|
|
|
2018-09-11 14:17:16 +00:00
|
|
|
void term_provide_logctx(Terminal *term, LogContext *logctx)
|
2002-10-26 12:58:13 +00:00
|
|
|
{
|
|
|
|
term->logctx = logctx;
|
|
|
|
}
|
2004-11-27 19:34:45 +00:00
|
|
|
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
void term_set_focus(Terminal *term, bool has_focus)
|
2004-11-27 19:34:45 +00:00
|
|
|
{
|
|
|
|
term->has_focus = has_focus;
|
|
|
|
term_schedule_cblink(term);
|
|
|
|
}
|
2005-04-21 13:57:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Provide "auto" settings for remote tty modes, suitable for an
|
|
|
|
* application with a terminal window.
|
|
|
|
*/
|
|
|
|
char *term_get_ttymode(Terminal *term, const char *mode)
|
|
|
|
{
|
2015-05-15 10:15:42 +00:00
|
|
|
const char *val = NULL;
|
2005-04-21 13:57:08 +00:00
|
|
|
if (strcmp(mode, "ERASE") == 0) {
|
2019-09-08 19:29:00 +00:00
|
|
|
val = term->bksp_is_delete ? "^?" : "^H";
|
2016-05-03 07:43:50 +00:00
|
|
|
} else if (strcmp(mode, "IUTF8") == 0) {
|
2021-02-07 19:59:20 +00:00
|
|
|
val = (term->ucsdata->line_codepage == CP_UTF8) ? "yes" : "no";
|
2005-04-21 13:57:08 +00:00
|
|
|
}
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
/* FIXME: perhaps we should set ONLCR based on lfhascr as well? */
|
2007-09-03 20:33:40 +00:00
|
|
|
/* FIXME: or ECHO and friends based on local echo state? */
|
2005-04-21 13:57:08 +00:00
|
|
|
return dupstr(val);
|
|
|
|
}
|
2005-10-30 20:24:09 +00:00
|
|
|
|
|
|
|
struct term_userpass_state {
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
prompts_t *prompts;
|
2005-10-30 20:24:09 +00:00
|
|
|
size_t curr_prompt;
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
enum TermUserpassPromptState {
|
|
|
|
TUS_INITIAL, /* haven't even printed the prompt yet */
|
|
|
|
TUS_ACTIVE, /* prompt is currently receiving user input */
|
|
|
|
TUS_ABORTED, /* user pressed ^C or ^D to cancel prompt */
|
|
|
|
} prompt_state;
|
|
|
|
Terminal *term;
|
|
|
|
TermLineEditor *le;
|
|
|
|
TermLineEditorCallbackReceiver le_rcv;
|
2005-10-30 20:24:09 +00:00
|
|
|
};
|
|
|
|
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
static void term_userpass_next_prompt(struct term_userpass_state *s);
|
2019-03-09 15:51:38 +00:00
|
|
|
|
Complete rework of terminal userpass input system.
The system for handling seat_get_userpass_input has always been
structured differently between GUI PuTTY and CLI tools like Plink.
In the CLI tools, password input is read directly from the OS
terminal/console device by console_get_userpass_input; this means that
you need to ensure the same terminal input data _hasn't_ already been
consumed by the main event loop and sent on to the backend. This is
achieved by the backend_sendok() method, which tells the event loop
when the backend has finished issuing password prompts, and hence,
when it's safe to start passing standard input to backend_send().
But in the GUI tools, input generated by the terminal window has
always been sent straight to backend_send(), regardless of whether
backend_sendok() says it wants it. So the terminal-based
implementation of username and password prompts has to work by
consuming input data that had _already_ been passed to the backend -
hence, any backend that needs to do that must keep its input on a
bufchain, and pass that bufchain to seat_get_userpass_input.
It's awkward that these two totally different systems coexist in the
first place. And now that SSH proxying needs to present interactive
prompts of its own, it's clear which one should win: the CLI style is
the Right Thing. So this change reworks the GUI side of the mechanism
to be more similar: terminal data now goes into a queue in the Ldisc,
and is not sent on to the backend until the backend says it's ready
for it via backend_sendok(). So terminal-based userpass prompts can
now consume data directly from that queue during the connection setup
stage.
As a result, the 'bufchain *' parameter has vanished from all the
userpass_input functions (both the official implementations of the
Seat trait method, and term_get_userpass_input() to which some of
those implementations delegate). The only function that actually used
that bufchain, namely term_get_userpass_input(), now instead reads
from the ldisc's input queue via a couple of new Ldisc functions.
(Not _trivial_ functions, since input buffered by Ldisc can be a
mixture of raw bytes and session specials like SS_EOL! The input queue
inside Ldisc is a bufchain containing a fiddly binary encoding that
can represent an arbitrary interleaving of those things.)
This greatly simplifies the calls to seat_get_userpass_input in
backends, which now don't have to mess about with passing their own
user_input bufchain around, or toggling their want_user_input flag
back and forth to request data to put on to that bufchain.
But the flip side is that now there has to be some _other_ method for
notifying the terminal when there's more input to be consumed during
an interactive prompt, and for notifying the backend when prompt input
has finished so that it can proceed to the next stage of the protocol.
This is done by a pair of extra callbacks: when more data is put on to
Ldisc's input queue, it triggers a call to term_get_userpass_input,
and when term_get_userpass_input finishes, it calls a callback
function provided in the prompts_t.
Therefore, any use of a prompts_t which *might* be asynchronous must
fill in the latter callback when setting up the prompts_t. In SSH, the
callback is centralised into a common PPL helper function, which
reinvokes the same PPL's process_queue coroutine; in rlogin we have to
set it up ourselves.
I'm sorry for this large and sprawling patch: I tried fairly hard to
break it up into individually comprehensible sub-patches, but I just
couldn't tease out any part of it that would stand sensibly alone.
2021-09-14 10:57:21 +00:00
|
|
|
/*
|
|
|
|
* Signal that a prompts_t is done. This involves sending a
|
|
|
|
* notification to the caller, and also turning off our own callback
|
|
|
|
* that listens for more data arriving in the ldisc's input queue.
|
|
|
|
*/
|
Richer data type for interactive prompt results.
All the seat functions that request an interactive prompt of some kind
to the user - both the main seat_get_userpass_input and the various
confirmation dialogs for things like host keys - were using a simple
int return value, with the general semantics of 0 = "fail", 1 =
"proceed" (and in the case of seat_get_userpass_input, answers to the
prompts were provided), and -1 = "request in progress, wait for a
callback".
In this commit I change all those functions' return types to a new
struct called SeatPromptResult, whose primary field is an enum
replacing those simple integer values.
The main purpose is that the enum has not three but _four_ values: the
"fail" result has been split into 'user abort' and 'software abort'.
The distinction is that a user abort occurs as a result of an
interactive UI action, such as the user clicking 'cancel' in a dialog
box or hitting ^D or ^C at a terminal password prompt - and therefore,
there's no need to display an error message telling the user that the
interactive operation has failed, because the user already knows,
because they _did_ it. 'Software abort' is from any other cause, where
PuTTY is the first to know there was a problem, and has to tell the
user.
We already had this 'user abort' vs 'software abort' distinction in
other parts of the code - the SSH backend has separate termination
functions which protocol layers can call. But we assumed that any
failure from an interactive prompt request fell into the 'user abort'
category, which is not true. A couple of examples: if you configure a
host key fingerprint in your saved session via the SSH > Host keys
pane, and the server presents a host key that doesn't match it, then
verify_ssh_host_key would report that the user had aborted the
connection, and feel no need to tell the user what had gone wrong!
Similarly, if a password provided on the command line was not
accepted, then (after I fixed the semantics of that in the previous
commit) the same wrong handling would occur.
So now, those Seat prompt functions too can communicate whether the
user or the software originated a connection abort. And in the latter
case, we also provide an error message to present to the user. Result:
in those two example cases (and others), error messages should no
longer go missing.
Implementation note: to avoid the hassle of having the error message
in a SeatPromptResult being a dynamically allocated string (and hence,
every recipient of one must always check whether it's non-NULL and
free it on every exit path, plus being careful about copying the
struct around), I've instead arranged that the structure contains a
function pointer and a couple of parameters, so that the string form
of the message can be constructed on demand. That way, the only users
who need to free it are the ones who actually _asked_ for it in the
first place, which is a much smaller set.
(This is one of the rare occasions that I regret not having C++'s
extra features available in this code base - a unique_ptr or
shared_ptr to a string would have been just the thing here, and the
compiler would have done all the hard work for me of remembering where
to insert the frees!)
2021-12-28 17:52:00 +00:00
|
|
|
static inline SeatPromptResult signal_prompts_t(Terminal *term, prompts_t *p,
|
|
|
|
SeatPromptResult spr)
|
Complete rework of terminal userpass input system.
The system for handling seat_get_userpass_input has always been
structured differently between GUI PuTTY and CLI tools like Plink.
In the CLI tools, password input is read directly from the OS
terminal/console device by console_get_userpass_input; this means that
you need to ensure the same terminal input data _hasn't_ already been
consumed by the main event loop and sent on to the backend. This is
achieved by the backend_sendok() method, which tells the event loop
when the backend has finished issuing password prompts, and hence,
when it's safe to start passing standard input to backend_send().
But in the GUI tools, input generated by the terminal window has
always been sent straight to backend_send(), regardless of whether
backend_sendok() says it wants it. So the terminal-based
implementation of username and password prompts has to work by
consuming input data that had _already_ been passed to the backend -
hence, any backend that needs to do that must keep its input on a
bufchain, and pass that bufchain to seat_get_userpass_input.
It's awkward that these two totally different systems coexist in the
first place. And now that SSH proxying needs to present interactive
prompts of its own, it's clear which one should win: the CLI style is
the Right Thing. So this change reworks the GUI side of the mechanism
to be more similar: terminal data now goes into a queue in the Ldisc,
and is not sent on to the backend until the backend says it's ready
for it via backend_sendok(). So terminal-based userpass prompts can
now consume data directly from that queue during the connection setup
stage.
As a result, the 'bufchain *' parameter has vanished from all the
userpass_input functions (both the official implementations of the
Seat trait method, and term_get_userpass_input() to which some of
those implementations delegate). The only function that actually used
that bufchain, namely term_get_userpass_input(), now instead reads
from the ldisc's input queue via a couple of new Ldisc functions.
(Not _trivial_ functions, since input buffered by Ldisc can be a
mixture of raw bytes and session specials like SS_EOL! The input queue
inside Ldisc is a bufchain containing a fiddly binary encoding that
can represent an arbitrary interleaving of those things.)
This greatly simplifies the calls to seat_get_userpass_input in
backends, which now don't have to mess about with passing their own
user_input bufchain around, or toggling their want_user_input flag
back and forth to request data to put on to that bufchain.
But the flip side is that now there has to be some _other_ method for
notifying the terminal when there's more input to be consumed during
an interactive prompt, and for notifying the backend when prompt input
has finished so that it can proceed to the next stage of the protocol.
This is done by a pair of extra callbacks: when more data is put on to
Ldisc's input queue, it triggers a call to term_get_userpass_input,
and when term_get_userpass_input finishes, it calls a callback
function provided in the prompts_t.
Therefore, any use of a prompts_t which *might* be asynchronous must
fill in the latter callback when setting up the prompts_t. In SSH, the
callback is centralised into a common PPL helper function, which
reinvokes the same PPL's process_queue coroutine; in rlogin we have to
set it up ourselves.
I'm sorry for this large and sprawling patch: I tried fairly hard to
break it up into individually comprehensible sub-patches, but I just
couldn't tease out any part of it that would stand sensibly alone.
2021-09-14 10:57:21 +00:00
|
|
|
{
|
|
|
|
assert(p->callback && "Asynchronous userpass input requires a callback");
|
|
|
|
queue_toplevel_callback(p->callback, p->callback_ctx);
|
2022-01-29 18:22:31 +00:00
|
|
|
if (term->ldisc)
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
ldisc_provide_userpass_le(term->ldisc, NULL);
|
Richer data type for interactive prompt results.
All the seat functions that request an interactive prompt of some kind
to the user - both the main seat_get_userpass_input and the various
confirmation dialogs for things like host keys - were using a simple
int return value, with the general semantics of 0 = "fail", 1 =
"proceed" (and in the case of seat_get_userpass_input, answers to the
prompts were provided), and -1 = "request in progress, wait for a
callback".
In this commit I change all those functions' return types to a new
struct called SeatPromptResult, whose primary field is an enum
replacing those simple integer values.
The main purpose is that the enum has not three but _four_ values: the
"fail" result has been split into 'user abort' and 'software abort'.
The distinction is that a user abort occurs as a result of an
interactive UI action, such as the user clicking 'cancel' in a dialog
box or hitting ^D or ^C at a terminal password prompt - and therefore,
there's no need to display an error message telling the user that the
interactive operation has failed, because the user already knows,
because they _did_ it. 'Software abort' is from any other cause, where
PuTTY is the first to know there was a problem, and has to tell the
user.
We already had this 'user abort' vs 'software abort' distinction in
other parts of the code - the SSH backend has separate termination
functions which protocol layers can call. But we assumed that any
failure from an interactive prompt request fell into the 'user abort'
category, which is not true. A couple of examples: if you configure a
host key fingerprint in your saved session via the SSH > Host keys
pane, and the server presents a host key that doesn't match it, then
verify_ssh_host_key would report that the user had aborted the
connection, and feel no need to tell the user what had gone wrong!
Similarly, if a password provided on the command line was not
accepted, then (after I fixed the semantics of that in the previous
commit) the same wrong handling would occur.
So now, those Seat prompt functions too can communicate whether the
user or the software originated a connection abort. And in the latter
case, we also provide an error message to present to the user. Result:
in those two example cases (and others), error messages should no
longer go missing.
Implementation note: to avoid the hassle of having the error message
in a SeatPromptResult being a dynamically allocated string (and hence,
every recipient of one must always check whether it's non-NULL and
free it on every exit path, plus being careful about copying the
struct around), I've instead arranged that the structure contains a
function pointer and a couple of parameters, so that the string form
of the message can be constructed on demand. That way, the only users
who need to free it are the ones who actually _asked_ for it in the
first place, which is a much smaller set.
(This is one of the rare occasions that I regret not having C++'s
extra features available in this code base - a unique_ptr or
shared_ptr to a string would have been just the thing here, and the
compiler would have done all the hard work for me of remembering where
to insert the frees!)
2021-12-28 17:52:00 +00:00
|
|
|
p->spr = spr;
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
if (p->data) {
|
|
|
|
term_userpass_state_free(p->data);
|
|
|
|
p->data = NULL;
|
|
|
|
}
|
Richer data type for interactive prompt results.
All the seat functions that request an interactive prompt of some kind
to the user - both the main seat_get_userpass_input and the various
confirmation dialogs for things like host keys - were using a simple
int return value, with the general semantics of 0 = "fail", 1 =
"proceed" (and in the case of seat_get_userpass_input, answers to the
prompts were provided), and -1 = "request in progress, wait for a
callback".
In this commit I change all those functions' return types to a new
struct called SeatPromptResult, whose primary field is an enum
replacing those simple integer values.
The main purpose is that the enum has not three but _four_ values: the
"fail" result has been split into 'user abort' and 'software abort'.
The distinction is that a user abort occurs as a result of an
interactive UI action, such as the user clicking 'cancel' in a dialog
box or hitting ^D or ^C at a terminal password prompt - and therefore,
there's no need to display an error message telling the user that the
interactive operation has failed, because the user already knows,
because they _did_ it. 'Software abort' is from any other cause, where
PuTTY is the first to know there was a problem, and has to tell the
user.
We already had this 'user abort' vs 'software abort' distinction in
other parts of the code - the SSH backend has separate termination
functions which protocol layers can call. But we assumed that any
failure from an interactive prompt request fell into the 'user abort'
category, which is not true. A couple of examples: if you configure a
host key fingerprint in your saved session via the SSH > Host keys
pane, and the server presents a host key that doesn't match it, then
verify_ssh_host_key would report that the user had aborted the
connection, and feel no need to tell the user what had gone wrong!
Similarly, if a password provided on the command line was not
accepted, then (after I fixed the semantics of that in the previous
commit) the same wrong handling would occur.
So now, those Seat prompt functions too can communicate whether the
user or the software originated a connection abort. And in the latter
case, we also provide an error message to present to the user. Result:
in those two example cases (and others), error messages should no
longer go missing.
Implementation note: to avoid the hassle of having the error message
in a SeatPromptResult being a dynamically allocated string (and hence,
every recipient of one must always check whether it's non-NULL and
free it on every exit path, plus being careful about copying the
struct around), I've instead arranged that the structure contains a
function pointer and a couple of parameters, so that the string form
of the message can be constructed on demand. That way, the only users
who need to free it are the ones who actually _asked_ for it in the
first place, which is a much smaller set.
(This is one of the rare occasions that I regret not having C++'s
extra features available in this code base - a unique_ptr or
shared_ptr to a string would have been just the thing here, and the
compiler would have done all the hard work for me of remembering where
to insert the frees!)
2021-12-28 17:52:00 +00:00
|
|
|
return spr;
|
Complete rework of terminal userpass input system.
The system for handling seat_get_userpass_input has always been
structured differently between GUI PuTTY and CLI tools like Plink.
In the CLI tools, password input is read directly from the OS
terminal/console device by console_get_userpass_input; this means that
you need to ensure the same terminal input data _hasn't_ already been
consumed by the main event loop and sent on to the backend. This is
achieved by the backend_sendok() method, which tells the event loop
when the backend has finished issuing password prompts, and hence,
when it's safe to start passing standard input to backend_send().
But in the GUI tools, input generated by the terminal window has
always been sent straight to backend_send(), regardless of whether
backend_sendok() says it wants it. So the terminal-based
implementation of username and password prompts has to work by
consuming input data that had _already_ been passed to the backend -
hence, any backend that needs to do that must keep its input on a
bufchain, and pass that bufchain to seat_get_userpass_input.
It's awkward that these two totally different systems coexist in the
first place. And now that SSH proxying needs to present interactive
prompts of its own, it's clear which one should win: the CLI style is
the Right Thing. So this change reworks the GUI side of the mechanism
to be more similar: terminal data now goes into a queue in the Ldisc,
and is not sent on to the backend until the backend says it's ready
for it via backend_sendok(). So terminal-based userpass prompts can
now consume data directly from that queue during the connection setup
stage.
As a result, the 'bufchain *' parameter has vanished from all the
userpass_input functions (both the official implementations of the
Seat trait method, and term_get_userpass_input() to which some of
those implementations delegate). The only function that actually used
that bufchain, namely term_get_userpass_input(), now instead reads
from the ldisc's input queue via a couple of new Ldisc functions.
(Not _trivial_ functions, since input buffered by Ldisc can be a
mixture of raw bytes and session specials like SS_EOL! The input queue
inside Ldisc is a bufchain containing a fiddly binary encoding that
can represent an arbitrary interleaving of those things.)
This greatly simplifies the calls to seat_get_userpass_input in
backends, which now don't have to mess about with passing their own
user_input bufchain around, or toggling their want_user_input flag
back and forth to request data to put on to that bufchain.
But the flip side is that now there has to be some _other_ method for
notifying the terminal when there's more input to be consumed during
an interactive prompt, and for notifying the backend when prompt input
has finished so that it can proceed to the next stage of the protocol.
This is done by a pair of extra callbacks: when more data is put on to
Ldisc's input queue, it triggers a call to term_get_userpass_input,
and when term_get_userpass_input finishes, it calls a callback
function provided in the prompts_t.
Therefore, any use of a prompts_t which *might* be asynchronous must
fill in the latter callback when setting up the prompts_t. In SSH, the
callback is centralised into a common PPL helper function, which
reinvokes the same PPL's process_queue coroutine; in rlogin we have to
set it up ourselves.
I'm sorry for this large and sprawling patch: I tried fairly hard to
break it up into individually comprehensible sub-patches, but I just
couldn't tease out any part of it that would stand sensibly alone.
2021-09-14 10:57:21 +00:00
|
|
|
}
|
|
|
|
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
/* Tiny wrapper to make it easier to write lots of little strings */
|
|
|
|
static inline void term_write(Terminal *term, ptrlen data)
|
|
|
|
{
|
|
|
|
term_data(term, data.ptr, data.len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void term_lineedit_to_terminal(
|
|
|
|
TermLineEditorCallbackReceiver *rcv, ptrlen data)
|
|
|
|
{
|
|
|
|
struct term_userpass_state *s = container_of(
|
|
|
|
rcv, struct term_userpass_state, le_rcv);
|
|
|
|
prompt_t *pr = s->prompts->prompts[s->curr_prompt];
|
|
|
|
if (pr->echo)
|
|
|
|
term_write(s->term, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void term_lineedit_to_backend(
|
|
|
|
TermLineEditorCallbackReceiver *rcv, ptrlen data)
|
|
|
|
{
|
|
|
|
struct term_userpass_state *s = container_of(
|
|
|
|
rcv, struct term_userpass_state, le_rcv);
|
|
|
|
prompt_t *pr = s->prompts->prompts[s->curr_prompt];
|
|
|
|
put_datapl(pr->result, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void term_lineedit_newline(TermLineEditorCallbackReceiver *rcv)
|
|
|
|
{
|
|
|
|
struct term_userpass_state *s = container_of(
|
|
|
|
rcv, struct term_userpass_state, le_rcv);
|
|
|
|
|
|
|
|
prompt_t *pr = s->prompts->prompts[s->curr_prompt];
|
|
|
|
if (!pr->echo) {
|
|
|
|
/* If echo is disabled, we won't have printed the newline in
|
|
|
|
* term_lineedit_to_terminal, so print it now */
|
|
|
|
term_write(s->term, PTRLEN_LITERAL("\x0D\x0A"));
|
|
|
|
}
|
|
|
|
|
|
|
|
ldisc_provide_userpass_le(s->term->ldisc, NULL);
|
|
|
|
s->curr_prompt++;
|
|
|
|
s->prompt_state = TUS_INITIAL;
|
|
|
|
term_userpass_next_prompt(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void term_lineedit_special(
|
|
|
|
TermLineEditorCallbackReceiver *rcv, SessionSpecialCode code, int arg)
|
|
|
|
{
|
|
|
|
struct term_userpass_state *s = container_of(
|
|
|
|
rcv, struct term_userpass_state, le_rcv);
|
|
|
|
switch (code) {
|
|
|
|
case SS_IP:
|
|
|
|
case SS_EOF:
|
|
|
|
ldisc_provide_userpass_le(s->term->ldisc, NULL);
|
|
|
|
s->prompt_state = TUS_ABORTED;
|
|
|
|
signal_prompts_t(s->term, s->prompts, SPR_USER_ABORT);
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const TermLineEditorCallbackReceiverVtable
|
|
|
|
term_userpass_lineedit_receiver_vt = {
|
|
|
|
.to_terminal = term_lineedit_to_terminal,
|
|
|
|
.to_backend = term_lineedit_to_backend,
|
|
|
|
.special = term_lineedit_special,
|
|
|
|
.newline = term_lineedit_newline,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct term_userpass_state *term_userpass_state_new(
|
|
|
|
Terminal *term, prompts_t *prompts)
|
|
|
|
{
|
|
|
|
struct term_userpass_state *s = snew(struct term_userpass_state);
|
|
|
|
s->prompts = prompts;
|
|
|
|
s->curr_prompt = 0;
|
|
|
|
s->prompt_state = TUS_INITIAL;
|
|
|
|
s->term = term;
|
|
|
|
s->le_rcv.vt = &term_userpass_lineedit_receiver_vt;
|
|
|
|
s->le = lineedit_new(term, LE_INTERRUPT | LE_EOF_ALWAYS | LE_ESC_ERASES,
|
|
|
|
&s->le_rcv);
|
|
|
|
assert(!term->userpass_state);
|
|
|
|
term->userpass_state = s;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void term_userpass_state_free(struct term_userpass_state *s)
|
|
|
|
{
|
|
|
|
assert(s->term->userpass_state == s);
|
|
|
|
s->term->userpass_state = NULL;
|
|
|
|
lineedit_free(s->le);
|
|
|
|
sfree(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void term_userpass_next_prompt(struct term_userpass_state *s)
|
|
|
|
{
|
|
|
|
if (s->prompt_state != TUS_INITIAL)
|
|
|
|
return;
|
|
|
|
if (s->curr_prompt < s->prompts->n_prompts) {
|
|
|
|
prompt_t *pr = s->prompts->prompts[s->curr_prompt];
|
|
|
|
term_write(s->term, ptrlen_from_asciz(pr->prompt));
|
|
|
|
s->prompt_state = TUS_ACTIVE;
|
|
|
|
ldisc_provide_userpass_le(s->term->ldisc, s->le);
|
|
|
|
} else {
|
|
|
|
/* This triggers the callback provided by the userpass client,
|
|
|
|
* which will call term_userpass_state to fetch the result
|
|
|
|
* we're storing here */
|
|
|
|
signal_prompts_t(s->term, s->prompts, SPR_OK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-04 13:37:13 +00:00
|
|
|
static bool terminal_use_utf8 = true;
|
|
|
|
bool set_legacy_charset_handling(bool newvalue)
|
|
|
|
{
|
|
|
|
terminal_use_utf8 = !newvalue;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2005-10-30 20:24:09 +00:00
|
|
|
/*
|
|
|
|
* Process some terminal data in the course of username/password
|
|
|
|
* input.
|
|
|
|
*/
|
Richer data type for interactive prompt results.
All the seat functions that request an interactive prompt of some kind
to the user - both the main seat_get_userpass_input and the various
confirmation dialogs for things like host keys - were using a simple
int return value, with the general semantics of 0 = "fail", 1 =
"proceed" (and in the case of seat_get_userpass_input, answers to the
prompts were provided), and -1 = "request in progress, wait for a
callback".
In this commit I change all those functions' return types to a new
struct called SeatPromptResult, whose primary field is an enum
replacing those simple integer values.
The main purpose is that the enum has not three but _four_ values: the
"fail" result has been split into 'user abort' and 'software abort'.
The distinction is that a user abort occurs as a result of an
interactive UI action, such as the user clicking 'cancel' in a dialog
box or hitting ^D or ^C at a terminal password prompt - and therefore,
there's no need to display an error message telling the user that the
interactive operation has failed, because the user already knows,
because they _did_ it. 'Software abort' is from any other cause, where
PuTTY is the first to know there was a problem, and has to tell the
user.
We already had this 'user abort' vs 'software abort' distinction in
other parts of the code - the SSH backend has separate termination
functions which protocol layers can call. But we assumed that any
failure from an interactive prompt request fell into the 'user abort'
category, which is not true. A couple of examples: if you configure a
host key fingerprint in your saved session via the SSH > Host keys
pane, and the server presents a host key that doesn't match it, then
verify_ssh_host_key would report that the user had aborted the
connection, and feel no need to tell the user what had gone wrong!
Similarly, if a password provided on the command line was not
accepted, then (after I fixed the semantics of that in the previous
commit) the same wrong handling would occur.
So now, those Seat prompt functions too can communicate whether the
user or the software originated a connection abort. And in the latter
case, we also provide an error message to present to the user. Result:
in those two example cases (and others), error messages should no
longer go missing.
Implementation note: to avoid the hassle of having the error message
in a SeatPromptResult being a dynamically allocated string (and hence,
every recipient of one must always check whether it's non-NULL and
free it on every exit path, plus being careful about copying the
struct around), I've instead arranged that the structure contains a
function pointer and a couple of parameters, so that the string form
of the message can be constructed on demand. That way, the only users
who need to free it are the ones who actually _asked_ for it in the
first place, which is a much smaller set.
(This is one of the rare occasions that I regret not having C++'s
extra features available in this code base - a unique_ptr or
shared_ptr to a string would have been just the thing here, and the
compiler would have done all the hard work for me of remembering where
to insert the frees!)
2021-12-28 17:52:00 +00:00
|
|
|
SeatPromptResult term_get_userpass_input(Terminal *term, prompts_t *p)
|
2005-10-30 20:24:09 +00:00
|
|
|
{
|
Complete rework of terminal userpass input system.
The system for handling seat_get_userpass_input has always been
structured differently between GUI PuTTY and CLI tools like Plink.
In the CLI tools, password input is read directly from the OS
terminal/console device by console_get_userpass_input; this means that
you need to ensure the same terminal input data _hasn't_ already been
consumed by the main event loop and sent on to the backend. This is
achieved by the backend_sendok() method, which tells the event loop
when the backend has finished issuing password prompts, and hence,
when it's safe to start passing standard input to backend_send().
But in the GUI tools, input generated by the terminal window has
always been sent straight to backend_send(), regardless of whether
backend_sendok() says it wants it. So the terminal-based
implementation of username and password prompts has to work by
consuming input data that had _already_ been passed to the backend -
hence, any backend that needs to do that must keep its input on a
bufchain, and pass that bufchain to seat_get_userpass_input.
It's awkward that these two totally different systems coexist in the
first place. And now that SSH proxying needs to present interactive
prompts of its own, it's clear which one should win: the CLI style is
the Right Thing. So this change reworks the GUI side of the mechanism
to be more similar: terminal data now goes into a queue in the Ldisc,
and is not sent on to the backend until the backend says it's ready
for it via backend_sendok(). So terminal-based userpass prompts can
now consume data directly from that queue during the connection setup
stage.
As a result, the 'bufchain *' parameter has vanished from all the
userpass_input functions (both the official implementations of the
Seat trait method, and term_get_userpass_input() to which some of
those implementations delegate). The only function that actually used
that bufchain, namely term_get_userpass_input(), now instead reads
from the ldisc's input queue via a couple of new Ldisc functions.
(Not _trivial_ functions, since input buffered by Ldisc can be a
mixture of raw bytes and session specials like SS_EOL! The input queue
inside Ldisc is a bufchain containing a fiddly binary encoding that
can represent an arbitrary interleaving of those things.)
This greatly simplifies the calls to seat_get_userpass_input in
backends, which now don't have to mess about with passing their own
user_input bufchain around, or toggling their want_user_input flag
back and forth to request data to put on to that bufchain.
But the flip side is that now there has to be some _other_ method for
notifying the terminal when there's more input to be consumed during
an interactive prompt, and for notifying the backend when prompt input
has finished so that it can proceed to the next stage of the protocol.
This is done by a pair of extra callbacks: when more data is put on to
Ldisc's input queue, it triggers a call to term_get_userpass_input,
and when term_get_userpass_input finishes, it calls a callback
function provided in the prompts_t.
Therefore, any use of a prompts_t which *might* be asynchronous must
fill in the latter callback when setting up the prompts_t. In SSH, the
callback is centralised into a common PPL helper function, which
reinvokes the same PPL's process_queue coroutine; in rlogin we have to
set it up ourselves.
I'm sorry for this large and sprawling patch: I tried fairly hard to
break it up into individually comprehensible sub-patches, but I just
couldn't tease out any part of it that would stand sensibly alone.
2021-09-14 10:57:21 +00:00
|
|
|
if (!term->ldisc) {
|
|
|
|
/* Can't handle interactive prompts without an ldisc */
|
Richer data type for interactive prompt results.
All the seat functions that request an interactive prompt of some kind
to the user - both the main seat_get_userpass_input and the various
confirmation dialogs for things like host keys - were using a simple
int return value, with the general semantics of 0 = "fail", 1 =
"proceed" (and in the case of seat_get_userpass_input, answers to the
prompts were provided), and -1 = "request in progress, wait for a
callback".
In this commit I change all those functions' return types to a new
struct called SeatPromptResult, whose primary field is an enum
replacing those simple integer values.
The main purpose is that the enum has not three but _four_ values: the
"fail" result has been split into 'user abort' and 'software abort'.
The distinction is that a user abort occurs as a result of an
interactive UI action, such as the user clicking 'cancel' in a dialog
box or hitting ^D or ^C at a terminal password prompt - and therefore,
there's no need to display an error message telling the user that the
interactive operation has failed, because the user already knows,
because they _did_ it. 'Software abort' is from any other cause, where
PuTTY is the first to know there was a problem, and has to tell the
user.
We already had this 'user abort' vs 'software abort' distinction in
other parts of the code - the SSH backend has separate termination
functions which protocol layers can call. But we assumed that any
failure from an interactive prompt request fell into the 'user abort'
category, which is not true. A couple of examples: if you configure a
host key fingerprint in your saved session via the SSH > Host keys
pane, and the server presents a host key that doesn't match it, then
verify_ssh_host_key would report that the user had aborted the
connection, and feel no need to tell the user what had gone wrong!
Similarly, if a password provided on the command line was not
accepted, then (after I fixed the semantics of that in the previous
commit) the same wrong handling would occur.
So now, those Seat prompt functions too can communicate whether the
user or the software originated a connection abort. And in the latter
case, we also provide an error message to present to the user. Result:
in those two example cases (and others), error messages should no
longer go missing.
Implementation note: to avoid the hassle of having the error message
in a SeatPromptResult being a dynamically allocated string (and hence,
every recipient of one must always check whether it's non-NULL and
free it on every exit path, plus being careful about copying the
struct around), I've instead arranged that the structure contains a
function pointer and a couple of parameters, so that the string form
of the message can be constructed on demand. That way, the only users
who need to free it are the ones who actually _asked_ for it in the
first place, which is a much smaller set.
(This is one of the rare occasions that I regret not having C++'s
extra features available in this code base - a unique_ptr or
shared_ptr to a string would have been just the thing here, and the
compiler would have done all the hard work for me of remembering where
to insert the frees!)
2021-12-28 17:52:00 +00:00
|
|
|
return signal_prompts_t(term, p, SPR_SW_ABORT(
|
|
|
|
"Terminal not prepared for interactive prompts"));
|
Complete rework of terminal userpass input system.
The system for handling seat_get_userpass_input has always been
structured differently between GUI PuTTY and CLI tools like Plink.
In the CLI tools, password input is read directly from the OS
terminal/console device by console_get_userpass_input; this means that
you need to ensure the same terminal input data _hasn't_ already been
consumed by the main event loop and sent on to the backend. This is
achieved by the backend_sendok() method, which tells the event loop
when the backend has finished issuing password prompts, and hence,
when it's safe to start passing standard input to backend_send().
But in the GUI tools, input generated by the terminal window has
always been sent straight to backend_send(), regardless of whether
backend_sendok() says it wants it. So the terminal-based
implementation of username and password prompts has to work by
consuming input data that had _already_ been passed to the backend -
hence, any backend that needs to do that must keep its input on a
bufchain, and pass that bufchain to seat_get_userpass_input.
It's awkward that these two totally different systems coexist in the
first place. And now that SSH proxying needs to present interactive
prompts of its own, it's clear which one should win: the CLI style is
the Right Thing. So this change reworks the GUI side of the mechanism
to be more similar: terminal data now goes into a queue in the Ldisc,
and is not sent on to the backend until the backend says it's ready
for it via backend_sendok(). So terminal-based userpass prompts can
now consume data directly from that queue during the connection setup
stage.
As a result, the 'bufchain *' parameter has vanished from all the
userpass_input functions (both the official implementations of the
Seat trait method, and term_get_userpass_input() to which some of
those implementations delegate). The only function that actually used
that bufchain, namely term_get_userpass_input(), now instead reads
from the ldisc's input queue via a couple of new Ldisc functions.
(Not _trivial_ functions, since input buffered by Ldisc can be a
mixture of raw bytes and session specials like SS_EOL! The input queue
inside Ldisc is a bufchain containing a fiddly binary encoding that
can represent an arbitrary interleaving of those things.)
This greatly simplifies the calls to seat_get_userpass_input in
backends, which now don't have to mess about with passing their own
user_input bufchain around, or toggling their want_user_input flag
back and forth to request data to put on to that bufchain.
But the flip side is that now there has to be some _other_ method for
notifying the terminal when there's more input to be consumed during
an interactive prompt, and for notifying the backend when prompt input
has finished so that it can proceed to the next stage of the protocol.
This is done by a pair of extra callbacks: when more data is put on to
Ldisc's input queue, it triggers a call to term_get_userpass_input,
and when term_get_userpass_input finishes, it calls a callback
function provided in the prompts_t.
Therefore, any use of a prompts_t which *might* be asynchronous must
fill in the latter callback when setting up the prompts_t. In SSH, the
callback is centralised into a common PPL helper function, which
reinvokes the same PPL's process_queue coroutine; in rlogin we have to
set it up ourselves.
I'm sorry for this large and sprawling patch: I tried fairly hard to
break it up into individually comprehensible sub-patches, but I just
couldn't tease out any part of it that would stand sensibly alone.
2021-09-14 10:57:21 +00:00
|
|
|
}
|
|
|
|
|
Richer data type for interactive prompt results.
All the seat functions that request an interactive prompt of some kind
to the user - both the main seat_get_userpass_input and the various
confirmation dialogs for things like host keys - were using a simple
int return value, with the general semantics of 0 = "fail", 1 =
"proceed" (and in the case of seat_get_userpass_input, answers to the
prompts were provided), and -1 = "request in progress, wait for a
callback".
In this commit I change all those functions' return types to a new
struct called SeatPromptResult, whose primary field is an enum
replacing those simple integer values.
The main purpose is that the enum has not three but _four_ values: the
"fail" result has been split into 'user abort' and 'software abort'.
The distinction is that a user abort occurs as a result of an
interactive UI action, such as the user clicking 'cancel' in a dialog
box or hitting ^D or ^C at a terminal password prompt - and therefore,
there's no need to display an error message telling the user that the
interactive operation has failed, because the user already knows,
because they _did_ it. 'Software abort' is from any other cause, where
PuTTY is the first to know there was a problem, and has to tell the
user.
We already had this 'user abort' vs 'software abort' distinction in
other parts of the code - the SSH backend has separate termination
functions which protocol layers can call. But we assumed that any
failure from an interactive prompt request fell into the 'user abort'
category, which is not true. A couple of examples: if you configure a
host key fingerprint in your saved session via the SSH > Host keys
pane, and the server presents a host key that doesn't match it, then
verify_ssh_host_key would report that the user had aborted the
connection, and feel no need to tell the user what had gone wrong!
Similarly, if a password provided on the command line was not
accepted, then (after I fixed the semantics of that in the previous
commit) the same wrong handling would occur.
So now, those Seat prompt functions too can communicate whether the
user or the software originated a connection abort. And in the latter
case, we also provide an error message to present to the user. Result:
in those two example cases (and others), error messages should no
longer go missing.
Implementation note: to avoid the hassle of having the error message
in a SeatPromptResult being a dynamically allocated string (and hence,
every recipient of one must always check whether it's non-NULL and
free it on every exit path, plus being careful about copying the
struct around), I've instead arranged that the structure contains a
function pointer and a couple of parameters, so that the string form
of the message can be constructed on demand. That way, the only users
who need to free it are the ones who actually _asked_ for it in the
first place, which is a much smaller set.
(This is one of the rare occasions that I regret not having C++'s
extra features available in this code base - a unique_ptr or
shared_ptr to a string would have been just the thing here, and the
compiler would have done all the hard work for me of remembering where
to insert the frees!)
2021-12-28 17:52:00 +00:00
|
|
|
if (p->spr.kind != SPRK_INCOMPLETE) {
|
Complete rework of terminal userpass input system.
The system for handling seat_get_userpass_input has always been
structured differently between GUI PuTTY and CLI tools like Plink.
In the CLI tools, password input is read directly from the OS
terminal/console device by console_get_userpass_input; this means that
you need to ensure the same terminal input data _hasn't_ already been
consumed by the main event loop and sent on to the backend. This is
achieved by the backend_sendok() method, which tells the event loop
when the backend has finished issuing password prompts, and hence,
when it's safe to start passing standard input to backend_send().
But in the GUI tools, input generated by the terminal window has
always been sent straight to backend_send(), regardless of whether
backend_sendok() says it wants it. So the terminal-based
implementation of username and password prompts has to work by
consuming input data that had _already_ been passed to the backend -
hence, any backend that needs to do that must keep its input on a
bufchain, and pass that bufchain to seat_get_userpass_input.
It's awkward that these two totally different systems coexist in the
first place. And now that SSH proxying needs to present interactive
prompts of its own, it's clear which one should win: the CLI style is
the Right Thing. So this change reworks the GUI side of the mechanism
to be more similar: terminal data now goes into a queue in the Ldisc,
and is not sent on to the backend until the backend says it's ready
for it via backend_sendok(). So terminal-based userpass prompts can
now consume data directly from that queue during the connection setup
stage.
As a result, the 'bufchain *' parameter has vanished from all the
userpass_input functions (both the official implementations of the
Seat trait method, and term_get_userpass_input() to which some of
those implementations delegate). The only function that actually used
that bufchain, namely term_get_userpass_input(), now instead reads
from the ldisc's input queue via a couple of new Ldisc functions.
(Not _trivial_ functions, since input buffered by Ldisc can be a
mixture of raw bytes and session specials like SS_EOL! The input queue
inside Ldisc is a bufchain containing a fiddly binary encoding that
can represent an arbitrary interleaving of those things.)
This greatly simplifies the calls to seat_get_userpass_input in
backends, which now don't have to mess about with passing their own
user_input bufchain around, or toggling their want_user_input flag
back and forth to request data to put on to that bufchain.
But the flip side is that now there has to be some _other_ method for
notifying the terminal when there's more input to be consumed during
an interactive prompt, and for notifying the backend when prompt input
has finished so that it can proceed to the next stage of the protocol.
This is done by a pair of extra callbacks: when more data is put on to
Ldisc's input queue, it triggers a call to term_get_userpass_input,
and when term_get_userpass_input finishes, it calls a callback
function provided in the prompts_t.
Therefore, any use of a prompts_t which *might* be asynchronous must
fill in the latter callback when setting up the prompts_t. In SSH, the
callback is centralised into a common PPL helper function, which
reinvokes the same PPL's process_queue coroutine; in rlogin we have to
set it up ourselves.
I'm sorry for this large and sprawling patch: I tried fairly hard to
break it up into individually comprehensible sub-patches, but I just
couldn't tease out any part of it that would stand sensibly alone.
2021-09-14 10:57:21 +00:00
|
|
|
/* We've already finished these prompts, so return the same
|
|
|
|
* result again */
|
Richer data type for interactive prompt results.
All the seat functions that request an interactive prompt of some kind
to the user - both the main seat_get_userpass_input and the various
confirmation dialogs for things like host keys - were using a simple
int return value, with the general semantics of 0 = "fail", 1 =
"proceed" (and in the case of seat_get_userpass_input, answers to the
prompts were provided), and -1 = "request in progress, wait for a
callback".
In this commit I change all those functions' return types to a new
struct called SeatPromptResult, whose primary field is an enum
replacing those simple integer values.
The main purpose is that the enum has not three but _four_ values: the
"fail" result has been split into 'user abort' and 'software abort'.
The distinction is that a user abort occurs as a result of an
interactive UI action, such as the user clicking 'cancel' in a dialog
box or hitting ^D or ^C at a terminal password prompt - and therefore,
there's no need to display an error message telling the user that the
interactive operation has failed, because the user already knows,
because they _did_ it. 'Software abort' is from any other cause, where
PuTTY is the first to know there was a problem, and has to tell the
user.
We already had this 'user abort' vs 'software abort' distinction in
other parts of the code - the SSH backend has separate termination
functions which protocol layers can call. But we assumed that any
failure from an interactive prompt request fell into the 'user abort'
category, which is not true. A couple of examples: if you configure a
host key fingerprint in your saved session via the SSH > Host keys
pane, and the server presents a host key that doesn't match it, then
verify_ssh_host_key would report that the user had aborted the
connection, and feel no need to tell the user what had gone wrong!
Similarly, if a password provided on the command line was not
accepted, then (after I fixed the semantics of that in the previous
commit) the same wrong handling would occur.
So now, those Seat prompt functions too can communicate whether the
user or the software originated a connection abort. And in the latter
case, we also provide an error message to present to the user. Result:
in those two example cases (and others), error messages should no
longer go missing.
Implementation note: to avoid the hassle of having the error message
in a SeatPromptResult being a dynamically allocated string (and hence,
every recipient of one must always check whether it's non-NULL and
free it on every exit path, plus being careful about copying the
struct around), I've instead arranged that the structure contains a
function pointer and a couple of parameters, so that the string form
of the message can be constructed on demand. That way, the only users
who need to free it are the ones who actually _asked_ for it in the
first place, which is a much smaller set.
(This is one of the rare occasions that I regret not having C++'s
extra features available in this code base - a unique_ptr or
shared_ptr to a string would have been just the thing here, and the
compiler would have done all the hard work for me of remembering where
to insert the frees!)
2021-12-28 17:52:00 +00:00
|
|
|
return p->spr;
|
Complete rework of terminal userpass input system.
The system for handling seat_get_userpass_input has always been
structured differently between GUI PuTTY and CLI tools like Plink.
In the CLI tools, password input is read directly from the OS
terminal/console device by console_get_userpass_input; this means that
you need to ensure the same terminal input data _hasn't_ already been
consumed by the main event loop and sent on to the backend. This is
achieved by the backend_sendok() method, which tells the event loop
when the backend has finished issuing password prompts, and hence,
when it's safe to start passing standard input to backend_send().
But in the GUI tools, input generated by the terminal window has
always been sent straight to backend_send(), regardless of whether
backend_sendok() says it wants it. So the terminal-based
implementation of username and password prompts has to work by
consuming input data that had _already_ been passed to the backend -
hence, any backend that needs to do that must keep its input on a
bufchain, and pass that bufchain to seat_get_userpass_input.
It's awkward that these two totally different systems coexist in the
first place. And now that SSH proxying needs to present interactive
prompts of its own, it's clear which one should win: the CLI style is
the Right Thing. So this change reworks the GUI side of the mechanism
to be more similar: terminal data now goes into a queue in the Ldisc,
and is not sent on to the backend until the backend says it's ready
for it via backend_sendok(). So terminal-based userpass prompts can
now consume data directly from that queue during the connection setup
stage.
As a result, the 'bufchain *' parameter has vanished from all the
userpass_input functions (both the official implementations of the
Seat trait method, and term_get_userpass_input() to which some of
those implementations delegate). The only function that actually used
that bufchain, namely term_get_userpass_input(), now instead reads
from the ldisc's input queue via a couple of new Ldisc functions.
(Not _trivial_ functions, since input buffered by Ldisc can be a
mixture of raw bytes and session specials like SS_EOL! The input queue
inside Ldisc is a bufchain containing a fiddly binary encoding that
can represent an arbitrary interleaving of those things.)
This greatly simplifies the calls to seat_get_userpass_input in
backends, which now don't have to mess about with passing their own
user_input bufchain around, or toggling their want_user_input flag
back and forth to request data to put on to that bufchain.
But the flip side is that now there has to be some _other_ method for
notifying the terminal when there's more input to be consumed during
an interactive prompt, and for notifying the backend when prompt input
has finished so that it can proceed to the next stage of the protocol.
This is done by a pair of extra callbacks: when more data is put on to
Ldisc's input queue, it triggers a call to term_get_userpass_input,
and when term_get_userpass_input finishes, it calls a callback
function provided in the prompts_t.
Therefore, any use of a prompts_t which *might* be asynchronous must
fill in the latter callback when setting up the prompts_t. In SSH, the
callback is centralised into a common PPL helper function, which
reinvokes the same PPL's process_queue coroutine; in rlogin we have to
set it up ourselves.
I'm sorry for this large and sprawling patch: I tried fairly hard to
break it up into individually comprehensible sub-patches, but I just
couldn't tease out any part of it that would stand sensibly alone.
2021-09-14 10:57:21 +00:00
|
|
|
}
|
|
|
|
|
2005-10-30 20:24:09 +00:00
|
|
|
struct term_userpass_state *s = (struct term_userpass_state *)p->data;
|
Complete rework of terminal userpass input system.
The system for handling seat_get_userpass_input has always been
structured differently between GUI PuTTY and CLI tools like Plink.
In the CLI tools, password input is read directly from the OS
terminal/console device by console_get_userpass_input; this means that
you need to ensure the same terminal input data _hasn't_ already been
consumed by the main event loop and sent on to the backend. This is
achieved by the backend_sendok() method, which tells the event loop
when the backend has finished issuing password prompts, and hence,
when it's safe to start passing standard input to backend_send().
But in the GUI tools, input generated by the terminal window has
always been sent straight to backend_send(), regardless of whether
backend_sendok() says it wants it. So the terminal-based
implementation of username and password prompts has to work by
consuming input data that had _already_ been passed to the backend -
hence, any backend that needs to do that must keep its input on a
bufchain, and pass that bufchain to seat_get_userpass_input.
It's awkward that these two totally different systems coexist in the
first place. And now that SSH proxying needs to present interactive
prompts of its own, it's clear which one should win: the CLI style is
the Right Thing. So this change reworks the GUI side of the mechanism
to be more similar: terminal data now goes into a queue in the Ldisc,
and is not sent on to the backend until the backend says it's ready
for it via backend_sendok(). So terminal-based userpass prompts can
now consume data directly from that queue during the connection setup
stage.
As a result, the 'bufchain *' parameter has vanished from all the
userpass_input functions (both the official implementations of the
Seat trait method, and term_get_userpass_input() to which some of
those implementations delegate). The only function that actually used
that bufchain, namely term_get_userpass_input(), now instead reads
from the ldisc's input queue via a couple of new Ldisc functions.
(Not _trivial_ functions, since input buffered by Ldisc can be a
mixture of raw bytes and session specials like SS_EOL! The input queue
inside Ldisc is a bufchain containing a fiddly binary encoding that
can represent an arbitrary interleaving of those things.)
This greatly simplifies the calls to seat_get_userpass_input in
backends, which now don't have to mess about with passing their own
user_input bufchain around, or toggling their want_user_input flag
back and forth to request data to put on to that bufchain.
But the flip side is that now there has to be some _other_ method for
notifying the terminal when there's more input to be consumed during
an interactive prompt, and for notifying the backend when prompt input
has finished so that it can proceed to the next stage of the protocol.
This is done by a pair of extra callbacks: when more data is put on to
Ldisc's input queue, it triggers a call to term_get_userpass_input,
and when term_get_userpass_input finishes, it calls a callback
function provided in the prompts_t.
Therefore, any use of a prompts_t which *might* be asynchronous must
fill in the latter callback when setting up the prompts_t. In SSH, the
callback is centralised into a common PPL helper function, which
reinvokes the same PPL's process_queue coroutine; in rlogin we have to
set it up ourselves.
I'm sorry for this large and sprawling patch: I tried fairly hard to
break it up into individually comprehensible sub-patches, but I just
couldn't tease out any part of it that would stand sensibly alone.
2021-09-14 10:57:21 +00:00
|
|
|
|
2005-10-30 20:24:09 +00:00
|
|
|
if (!s) {
|
2019-09-08 19:29:00 +00:00
|
|
|
/*
|
|
|
|
* First call. Set some stuff up.
|
|
|
|
*/
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
p->data = s = term_userpass_state_new(term, p);
|
Richer data type for interactive prompt results.
All the seat functions that request an interactive prompt of some kind
to the user - both the main seat_get_userpass_input and the various
confirmation dialogs for things like host keys - were using a simple
int return value, with the general semantics of 0 = "fail", 1 =
"proceed" (and in the case of seat_get_userpass_input, answers to the
prompts were provided), and -1 = "request in progress, wait for a
callback".
In this commit I change all those functions' return types to a new
struct called SeatPromptResult, whose primary field is an enum
replacing those simple integer values.
The main purpose is that the enum has not three but _four_ values: the
"fail" result has been split into 'user abort' and 'software abort'.
The distinction is that a user abort occurs as a result of an
interactive UI action, such as the user clicking 'cancel' in a dialog
box or hitting ^D or ^C at a terminal password prompt - and therefore,
there's no need to display an error message telling the user that the
interactive operation has failed, because the user already knows,
because they _did_ it. 'Software abort' is from any other cause, where
PuTTY is the first to know there was a problem, and has to tell the
user.
We already had this 'user abort' vs 'software abort' distinction in
other parts of the code - the SSH backend has separate termination
functions which protocol layers can call. But we assumed that any
failure from an interactive prompt request fell into the 'user abort'
category, which is not true. A couple of examples: if you configure a
host key fingerprint in your saved session via the SSH > Host keys
pane, and the server presents a host key that doesn't match it, then
verify_ssh_host_key would report that the user had aborted the
connection, and feel no need to tell the user what had gone wrong!
Similarly, if a password provided on the command line was not
accepted, then (after I fixed the semantics of that in the previous
commit) the same wrong handling would occur.
So now, those Seat prompt functions too can communicate whether the
user or the software originated a connection abort. And in the latter
case, we also provide an error message to present to the user. Result:
in those two example cases (and others), error messages should no
longer go missing.
Implementation note: to avoid the hassle of having the error message
in a SeatPromptResult being a dynamically allocated string (and hence,
every recipient of one must always check whether it's non-NULL and
free it on every exit path, plus being careful about copying the
struct around), I've instead arranged that the structure contains a
function pointer and a couple of parameters, so that the string form
of the message can be constructed on demand. That way, the only users
who need to free it are the ones who actually _asked_ for it in the
first place, which is a much smaller set.
(This is one of the rare occasions that I regret not having C++'s
extra features available in this code base - a unique_ptr or
shared_ptr to a string would have been just the thing here, and the
compiler would have done all the hard work for me of remembering where
to insert the frees!)
2021-12-28 17:52:00 +00:00
|
|
|
p->spr = SPR_INCOMPLETE;
|
2023-03-04 13:37:13 +00:00
|
|
|
term->userpass_utf8_override = p->utf8 && terminal_use_utf8;
|
2019-09-08 19:29:00 +00:00
|
|
|
/* We only print the `name' caption if we have to... */
|
|
|
|
if (p->name_reqd && p->name) {
|
2019-03-09 15:51:38 +00:00
|
|
|
ptrlen plname = ptrlen_from_asciz(p->name);
|
|
|
|
term_write(term, plname);
|
|
|
|
if (!ptrlen_endswith(plname, PTRLEN_LITERAL("\n"), NULL))
|
|
|
|
term_write(term, PTRLEN_LITERAL("\r\n"));
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
/* ...but we always print any `instruction'. */
|
|
|
|
if (p->instruction) {
|
2019-03-09 15:51:38 +00:00
|
|
|
ptrlen plinst = ptrlen_from_asciz(p->instruction);
|
|
|
|
term_write(term, plinst);
|
|
|
|
if (!ptrlen_endswith(plinst, PTRLEN_LITERAL("\n"), NULL))
|
|
|
|
term_write(term, PTRLEN_LITERAL("\r\n"));
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Zero all the results, in case we abort half-way through.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < (int)p->n_prompts; i++)
|
2011-10-02 11:50:45 +00:00
|
|
|
prompt_set_result(p->prompts[i], "");
|
2019-09-08 19:29:00 +00:00
|
|
|
}
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
/* And print the first prompt. */
|
|
|
|
term_userpass_next_prompt(s);
|
2005-10-30 20:24:09 +00:00
|
|
|
}
|
|
|
|
|
New centralised version of local line editing.
This takes over from both the implementation in ldisc.c and the one in
term_get_userpass_input, which were imperfectly duplicating each
other's functionality. The new version should be more consistent
between the two already, and also, it means further improvements can
now be made in just one place.
In the course of this, I've restructured the inside of ldisc.c by
moving the input_queue bufchain to the other side of the translation
code in ldisc_send. Previously, ldisc_send received a string, an
optional 'dedicated key' indication (bodgily signalled by a negative
length) and an 'interactive' flag, translated that somehow into a
combination of raw backend output and specials, and saved the latter
in input_queue. Now it saves the original (string, dedicated flag,
interactive flag) data in input_queue, and doesn't do the translation
until the data is pulled back _out_ of the queue. That's because the
new line editing system expects to receive something much closer to
the original data format.
The term_get_userpass_input system is also substantially restructured.
Instead of ldisc.c handing each individual keystroke to terminal.c so
that it can do line editing on it, terminal.c now just gives the Ldisc
a pointer to its instance of the new TermLineEditor object - and then
ldisc.c can put keystrokes straight into that, in the same way it
would put them into its own TermLineEditor, without having to go via
terminal.c at all. So the term_get_userpass_input edifice is only
called back when the line editor actually delivers the answer to a
username or password prompt.
(I considered not _even_ having a separate TermLineEditor for password
prompts, and just letting ldisc.c use its own. But the problem is that
some of the behaviour differences between the two line editors are
deliberate, for example the use of ^D to signal 'abort this prompt',
and the use of Escape as an alternative line-clearing command. So
TermLineEditor has a flags word that allows ldisc and terminal to set
it up differently. Also this lets me give the two TermLineEditors a
different vtable of callback functions, which is a convenient way for
terminal.c to get notified when a prompt has been answered.)
The new line editor still passes all the tests I wrote for the old
one. But it already has a couple of important improvements, both in
the area of UTF-8 handling:
Firstly, when we display a UTF-8 character on the terminal, we check
with the terminal how many character cells it occupied, and then if
the user deletes it again from the editing buffer, we can emit the
right number of backspace-space-backspace sequences. (The old ldisc
line editor incorrectly assumed all Unicode characters had terminal
with 1, partly because its buffer was byte- rather than character-
oriented and so it was more than enough work just finding where the
character _start_ was.)
Secondly, terminal.c's userpass line editor would never emit a byte in
the 80-BF range to the terminal at all, which meant that nontrivial
UTF-8 characters always came out as U+FFFD blobs!
2023-03-04 12:56:01 +00:00
|
|
|
return SPR_INCOMPLETE;
|
2005-10-30 20:24:09 +00:00
|
|
|
}
|
2021-02-07 19:59:20 +00:00
|
|
|
|
|
|
|
void term_notify_minimised(Terminal *term, bool minimised)
|
|
|
|
{
|
|
|
|
term->minimised = minimised;
|
|
|
|
}
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
|
2021-05-15 21:05:27 +00:00
|
|
|
void term_notify_palette_changed(Terminal *term)
|
Centralise palette setup into terminal.c.
Now terminal.c makes nearly all the decisions about what the colour
palette should actually contain: it does the job of reading the
GUI-configurable colours out of Conf, and also the job of making up
the rest of the xterm-256 palette. The only exception is that TermWin
can provide a method to override some of the default colours, which on
Windows is used to implement the 'Use system colours' config option.
This saves code overall, partly because the front ends don't have to
be able to send palette data back to the Terminal any more (the
Terminal keeps the master copy and can answer palette-query escape
sequences from its own knowledge), and also because now there's only
one copy of the xterm-256 palette setup code (previously gtkwin.c and
window.c each had their own version of it).
In this rewrite, I've also introduced a multi-layered storage system
for the palette data in Terminal. One layer contains the palette
information derived from Conf; the next contains platform overrides
(currently just Windows's 'Use system colours'); the last one contains
overrides set by escape sequences in the middle of the session. The
topmost two layers can each _conditionally_ override the ones below.
As a result, if a server-side application manually resets (say) the
default fg and bg colours in mid-session to something that works well
in a particular application, those changes won't be wiped out by a
change in the Windows system colours or the Conf, which they would
have been before. Instead, changes in Conf or the system colours alter
the lower layers of the structure, but then when palette_rebuild is
called, the upper layer continues to override them, until a palette
reset (ESC]R) or terminal reset (e.g. ESC c) removes those upper-layer
changes. This seems like a more consistent strategy, in that the same
set of configuration settings will produce the same end result
regardless of what order they were applied in.
The palette-related methods in TermWin have had a total rework.
palette_get and palette_reset are both gone; palette_set can now set a
contiguous range of colours in one go; and the new
palette_get_overrides replaces window.c's old systopalette().
2021-02-07 19:59:21 +00:00
|
|
|
{
|
|
|
|
palette_reset(term, true);
|
|
|
|
}
|
2021-02-07 19:59:21 +00:00
|
|
|
|
|
|
|
void term_notify_window_pos(Terminal *term, int x, int y)
|
|
|
|
{
|
|
|
|
term->winpos_x = x;
|
|
|
|
term->winpos_y = y;
|
|
|
|
}
|
|
|
|
|
|
|
|
void term_notify_window_size_pixels(Terminal *term, int x, int y)
|
|
|
|
{
|
|
|
|
term->winpixsize_x = x;
|
|
|
|
term->winpixsize_y = y;
|
|
|
|
}
|