2002-10-09 18:09:42 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <ctype.h>
|
2002-12-31 12:20:34 +00:00
|
|
|
#include <locale.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <wchar.h>
|
2002-10-09 18:09:42 +00:00
|
|
|
|
|
|
|
#include <time.h>
|
2002-12-31 12:20:34 +00:00
|
|
|
|
2002-10-09 18:09:42 +00:00
|
|
|
#include "putty.h"
|
2003-04-05 16:36:11 +00:00
|
|
|
#include "charset.h"
|
2002-10-22 16:11:33 +00:00
|
|
|
#include "terminal.h"
|
2002-10-09 18:09:42 +00:00
|
|
|
#include "misc.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unix Unicode-handling routines.
|
|
|
|
*/
|
|
|
|
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool is_dbcs_leadbyte(int codepage, char byte)
|
2002-10-09 18:09:42 +00:00
|
|
|
{
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
return false; /* we don't do DBCS */
|
2002-10-09 18:09:42 +00:00
|
|
|
}
|
|
|
|
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
bool BinarySink_put_mb_to_wc(
|
|
|
|
BinarySink *bs, int codepage, const char *mbstr, int mblen)
|
2002-10-09 18:09:42 +00:00
|
|
|
{
|
2002-12-31 12:20:34 +00:00
|
|
|
if (codepage == DEFAULT_CODEPAGE) {
|
2019-09-08 19:29:00 +00:00
|
|
|
mbstate_t state;
|
2002-12-31 12:20:34 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
memset(&state, 0, sizeof state);
|
2002-12-31 12:20:34 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
while (mblen > 0) {
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
wchar_t wc;
|
|
|
|
size_t i = mbrtowc(&wc, mbstr, (size_t)mblen, &state);
|
2019-09-08 19:29:00 +00:00
|
|
|
if (i == (size_t)-1 || i == (size_t)-2)
|
|
|
|
break;
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
put_data(bs, &wc, sizeof(wc));
|
2019-09-08 19:29:00 +00:00
|
|
|
mbstr += i;
|
|
|
|
mblen -= i;
|
|
|
|
}
|
2003-01-01 22:25:25 +00:00
|
|
|
} else if (codepage == CS_NONE) {
|
2019-09-08 19:29:00 +00:00
|
|
|
while (mblen > 0) {
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
wchar_t wc = 0xD800 | (mbstr[0] & 0xFF);
|
|
|
|
put_data(bs, &wc, sizeof(wc));
|
2019-09-08 19:29:00 +00:00
|
|
|
mbstr++;
|
|
|
|
mblen--;
|
|
|
|
}
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
} else {
|
|
|
|
wchar_t wbuf[1024];
|
|
|
|
while (mblen > 0) {
|
|
|
|
int wlen = charset_to_unicode(&mbstr, &mblen, wbuf, lenof(wbuf),
|
|
|
|
codepage, NULL, NULL, 0);
|
|
|
|
put_data(bs, wbuf, wlen * sizeof(wchar_t));
|
|
|
|
}
|
|
|
|
}
|
2003-01-01 22:25:25 +00:00
|
|
|
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
/* We never expect to receive invalid charset values on Unix,
|
|
|
|
* because we're not dependent on an externally defined space of
|
|
|
|
* OS-provided code pages */
|
|
|
|
return true;
|
2002-10-13 11:24:25 +00:00
|
|
|
}
|
|
|
|
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
bool BinarySink_put_wc_to_mb(
|
|
|
|
BinarySink *bs, int codepage, const wchar_t *wcstr, int wclen,
|
|
|
|
const char *defchr)
|
2002-10-13 11:24:25 +00:00
|
|
|
{
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
size_t defchr_len = 0;
|
|
|
|
bool defchr_len_known = false;
|
|
|
|
|
2002-12-31 12:20:34 +00:00
|
|
|
if (codepage == DEFAULT_CODEPAGE) {
|
2019-09-08 19:29:00 +00:00
|
|
|
char output[MB_LEN_MAX];
|
|
|
|
mbstate_t state;
|
2002-12-31 12:20:34 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
memset(&state, 0, sizeof state);
|
2002-12-31 12:20:34 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
while (wclen > 0) {
|
2020-05-16 15:14:13 +00:00
|
|
|
size_t i = wcrtomb(output, wcstr[0], &state);
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
if (i == (size_t)-1) {
|
|
|
|
if (!defchr_len_known) {
|
|
|
|
defchr_len = strlen(defchr);
|
|
|
|
defchr_len_known = true;
|
|
|
|
}
|
|
|
|
put_data(bs, defchr, defchr_len);
|
|
|
|
} else {
|
|
|
|
put_data(bs, output, i);
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
wcstr++;
|
|
|
|
wclen--;
|
|
|
|
}
|
2003-01-01 22:25:25 +00:00
|
|
|
} else if (codepage == CS_NONE) {
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
while (wclen > 0) {
|
|
|
|
if (*wcstr >= 0xD800 && *wcstr < 0xD900) {
|
|
|
|
put_byte(bs, *wcstr & 0xFF);
|
|
|
|
} else {
|
|
|
|
if (!defchr_len_known) {
|
|
|
|
defchr_len = strlen(defchr);
|
|
|
|
defchr_len_known = true;
|
|
|
|
}
|
|
|
|
put_data(bs, defchr, defchr_len);
|
|
|
|
}
|
2019-09-08 19:29:00 +00:00
|
|
|
wcstr++;
|
|
|
|
wclen--;
|
|
|
|
}
|
2003-01-01 22:25:25 +00:00
|
|
|
} else {
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
char buf[2048];
|
|
|
|
defchr_len = strlen(defchr);
|
|
|
|
|
|
|
|
while (wclen > 0) {
|
|
|
|
int len = charset_from_unicode(
|
|
|
|
&wcstr, &wclen, buf, lenof(buf), codepage,
|
|
|
|
NULL, defchr, defchr_len);
|
|
|
|
put_data(bs, buf, len);
|
|
|
|
}
|
2003-01-01 22:25:25 +00:00
|
|
|
}
|
Rework Unicode conversion APIs to use a BinarySink.
The previous mb_to_wc and wc_to_mb had horrible and also buggy APIs.
This commit introduces a fresh pair of functions to replace them,
which generate output by writing to a BinarySink. So it's now up to
the caller to decide whether it wants the output written to a
fixed-size buffer with overflow checking (via buffer_sink), or
dynamically allocated, or even written directly to some other output
channel.
Nothing uses the new functions yet. I plan to migrate things over in
upcoming commits.
What was wrong with the old APIs: they had that awkward undocumented
Windows-specific 'flags' parameter that I described in the previous
commit and took out of the dup_X_to_Y wrappers. But much worse, the
semantics for buffer overflow were not just undocumented but actually
inconsistent. dup_wc_to_mb() in utils assumed that the underlying
wc_to_mb would fill the buffer nearly full and return the size of data
it wrote. In fact, this was untrue in the case where wc_to_mb called
WideCharToMultiByte: that returns straight-up failure, setting the
Windows error code to ERROR_INSUFFICIENT_BUFFER. It _does_ partially
fill the output buffer, but doesn't tell you how much it wrote!
What's wrong with the new API: it's a bit awkward to write a sequence
of wchar_t in native byte order to a byte-oriented BinarySink, so
people using put_mb_to_wc directly have to do some annoying pointer
casting. But I think that's less horrible than the previous APIs.
Another change: in the new API for wc_to_mb, defchr can be "", but not
NULL.
2024-09-24 07:18:48 +00:00
|
|
|
|
|
|
|
return true;
|
2002-10-09 18:09:42 +00:00
|
|
|
}
|
|
|
|
|
2003-01-02 16:17:56 +00:00
|
|
|
/*
|
2018-10-29 19:50:29 +00:00
|
|
|
* Return value is true if pterm is to run in direct-to-font mode.
|
2003-01-02 16:17:56 +00:00
|
|
|
*/
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
bool init_ucs(struct unicode_data *ucsdata, char *linecharset,
|
|
|
|
bool utf8_override, int font_charset, int vtmode)
|
2002-10-09 18:09:42 +00:00
|
|
|
{
|
Convert a lot of 'int' variables to 'bool'.
My normal habit these days, in new code, is to treat int and bool as
_almost_ completely separate types. I'm still willing to use C's
implicit test for zero on an integer (e.g. 'if (!blob.len)' is fine,
no need to spell it out as blob.len != 0), but generally, if a
variable is going to be conceptually a boolean, I like to declare it
bool and assign to it using 'true' or 'false' rather than 0 or 1.
PuTTY is an exception, because it predates the C99 bool, and I've
stuck to its existing coding style even when adding new code to it.
But it's been annoying me more and more, so now that I've decided C99
bool is an acceptable thing to require from our toolchain in the first
place, here's a quite thorough trawl through the source doing
'boolification'. Many variables and function parameters are now typed
as bool rather than int; many assignments of 0 or 1 to those variables
are now spelled 'true' or 'false'.
I managed this thorough conversion with the help of a custom clang
plugin that I wrote to trawl the AST and apply heuristics to point out
where things might want changing. So I've even managed to do a decent
job on parts of the code I haven't looked at in years!
To make the plugin's work easier, I pushed platform front ends
generally in the direction of using standard 'bool' in preference to
platform-specific boolean types like Windows BOOL or GTK's gboolean;
I've left the platform booleans in places they _have_ to be for the
platform APIs to work right, but variables only used by my own code
have been converted wherever I found them.
In a few places there are int values that look very like booleans in
_most_ of the places they're used, but have a rarely-used third value,
or a distinction between different nonzero values that most users
don't care about. In these cases, I've _removed_ uses of 'true' and
'false' for the return values, to emphasise that there's something
more subtle going on than a simple boolean answer:
- the 'multisel' field in dialog.h's list box structure, for which
the GTK front end in particular recognises a difference between 1
and 2 but nearly everything else treats as boolean
- the 'urgent' parameter to plug_receive, where 1 vs 2 tells you
something about the specific location of the urgent pointer, but
most clients only care about 0 vs 'something nonzero'
- the return value of wc_match, where -1 indicates a syntax error in
the wildcard.
- the return values from SSH-1 RSA-key loading functions, which use
-1 for 'wrong passphrase' and 0 for all other failures (so any
caller which already knows it's not loading an _encrypted private_
key can treat them as boolean)
- term->esc_query, and the 'query' parameter in toggle_mode in
terminal.c, which _usually_ hold 0 for ESC[123h or 1 for ESC[?123h,
but can also hold -1 for some other intervening character that we
don't support.
In a few places there's an integer that I haven't turned into a bool
even though it really _can_ only take values 0 or 1 (and, as above,
tried to make the call sites consistent in not calling those values
true and false), on the grounds that I thought it would make it more
confusing to imply that the 0 value was in some sense 'negative' or
bad and the 1 positive or good:
- the return value of plug_accepting uses the POSIXish convention of
0=success and nonzero=error; I think if I made it bool then I'd
also want to reverse its sense, and that's a job for a separate
piece of work.
- the 'screen' parameter to lineptr() in terminal.c, where 0 and 1
represent the default and alternate screens. There's no obvious
reason why one of those should be considered 'true' or 'positive'
or 'success' - they're just indices - so I've left it as int.
ssh_scp_recv had particularly confusing semantics for its previous int
return value: its call sites used '<= 0' to check for error, but it
never actually returned a negative number, just 0 or 1. Now the
function and its call sites agree that it's a bool.
In a couple of places I've renamed variables called 'ret', because I
don't like that name any more - it's unclear whether it means the
return value (in preparation) for the _containing_ function or the
return value received from a subroutine call, and occasionally I've
accidentally used the same variable for both and introduced a bug. So
where one of those got in my way, I've renamed it to 'toret' or 'retd'
(the latter short for 'returned') in line with my usual modern
practice, but I haven't done a thorough job of finding all of them.
Finally, one amusing side effect of doing this is that I've had to
separate quite a few chained assignments. It used to be perfectly fine
to write 'a = b = c = TRUE' when a,b,c were int and TRUE was just a
the 'true' defined by stdbool.h, that idiom provokes a warning from
gcc: 'suggest parentheses around assignment used as truth value'!
2018-11-02 19:23:19 +00:00
|
|
|
int i;
|
|
|
|
bool ret = false;
|
2002-12-31 12:20:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In the platform-independent parts of the code, font_codepage
|
|
|
|
* is used only for system DBCS support - which we don't
|
|
|
|
* support at all. So we set this to something which will never
|
|
|
|
* be used.
|
|
|
|
*/
|
2003-01-14 18:28:23 +00:00
|
|
|
ucsdata->font_codepage = -1;
|
2002-12-31 12:20:34 +00:00
|
|
|
|
|
|
|
/*
|
2004-10-16 14:17:58 +00:00
|
|
|
* If utf8_override is set and the POSIX locale settings
|
|
|
|
* dictate a UTF-8 character set, then just go straight for
|
|
|
|
* UTF-8.
|
2002-12-31 12:20:34 +00:00
|
|
|
*/
|
2004-10-16 14:17:58 +00:00
|
|
|
ucsdata->line_codepage = CS_NONE;
|
|
|
|
if (utf8_override) {
|
2019-09-08 19:29:00 +00:00
|
|
|
const char *s;
|
|
|
|
if (((s = getenv("LC_ALL")) && *s) ||
|
|
|
|
((s = getenv("LC_CTYPE")) && *s) ||
|
|
|
|
((s = getenv("LANG")) && *s)) {
|
|
|
|
if (strstr(s, "UTF-8"))
|
|
|
|
ucsdata->line_codepage = CS_UTF8;
|
|
|
|
}
|
2004-10-16 14:17:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Failing that, line_codepage should be decoded from the
|
Post-release destabilisation! Completely remove the struct type
'Config' in putty.h, which stores all PuTTY's settings and includes an
arbitrary length limit on every single one of those settings which is
stored in string form. In place of it is 'Conf', an opaque data type
everywhere outside the new file conf.c, which stores a list of (key,
value) pairs in which every key contains an integer identifying a
configuration setting, and for some of those integers the key also
contains extra parts (so that, for instance, CONF_environmt is a
string-to-string mapping). Everywhere that a Config was previously
used, a Conf is now; everywhere there was a Config structure copy,
conf_copy() is called; every lookup, adjustment, load and save
operation on a Config has been rewritten; and there's a mechanism for
serialising a Conf into a binary blob and back for use with Duplicate
Session.
User-visible effects of this change _should_ be minimal, though I
don't doubt I've introduced one or two bugs here and there which will
eventually be found. The _intended_ visible effects of this change are
that all arbitrary limits on configuration strings and lists (e.g.
limit on number of port forwardings) should now disappear; that list
boxes in the configuration will now be displayed in a sorted order
rather than the arbitrary order in which they were added to the list
(since the underlying data structure is now a sorted tree234 rather
than an ad-hoc comma-separated string); and one more specific change,
which is that local and dynamic port forwardings on the same port
number are now mutually exclusive in the configuration (putting 'D' in
the key rather than the value was a mistake in the first place).
One other reorganisation as a result of this is that I've moved all
the dialog.c standard handlers (dlg_stdeditbox_handler and friends)
out into config.c, because I can't really justify calling them generic
any more. When they took a pointer to an arbitrary structure type and
the offset of a field within that structure, they were independent of
whether that structure was a Config or something completely different,
but now they really do expect to talk to a Conf, which can _only_ be
used for PuTTY configuration, so I've renamed them all things like
conf_editbox_handler and moved them out of the nominally independent
dialog-box management module into the PuTTY-specific config.c.
[originally from svn r9214]
2011-07-14 18:52:21 +00:00
|
|
|
* specification in conf.
|
2004-10-16 14:17:58 +00:00
|
|
|
*/
|
|
|
|
if (ucsdata->line_codepage == CS_NONE)
|
2019-09-08 19:29:00 +00:00
|
|
|
ucsdata->line_codepage = decode_codepage(linecharset);
|
2002-12-31 12:20:34 +00:00
|
|
|
|
2003-01-01 22:25:25 +00:00
|
|
|
/*
|
|
|
|
* If line_codepage is _still_ CS_NONE, we assume we're using
|
|
|
|
* the font's own encoding. This has been passed in to us, so
|
|
|
|
* we use that. If it's still CS_NONE after _that_ - i.e. the
|
|
|
|
* font we were given had an incomprehensible charset - then we
|
|
|
|
* fall back to using the D800 page.
|
|
|
|
*/
|
2003-01-14 18:28:23 +00:00
|
|
|
if (ucsdata->line_codepage == CS_NONE)
|
2019-09-08 19:29:00 +00:00
|
|
|
ucsdata->line_codepage = font_charset;
|
2002-12-31 12:20:34 +00:00
|
|
|
|
2003-01-14 18:28:23 +00:00
|
|
|
if (ucsdata->line_codepage == CS_NONE)
|
2019-09-08 19:29:00 +00:00
|
|
|
ret = true;
|
2003-01-02 16:17:56 +00:00
|
|
|
|
2002-12-31 12:20:34 +00:00
|
|
|
/*
|
|
|
|
* Set up unitab_line, by translating each individual character
|
|
|
|
* in the line codepage into Unicode.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 256; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
char c[1];
|
2011-09-16 19:18:52 +00:00
|
|
|
const char *p;
|
2019-09-08 19:29:00 +00:00
|
|
|
wchar_t wc[1];
|
|
|
|
int len;
|
|
|
|
c[0] = i;
|
|
|
|
p = c;
|
|
|
|
len = 1;
|
|
|
|
if (ucsdata->line_codepage == CS_NONE)
|
|
|
|
ucsdata->unitab_line[i] = 0xD800 | i;
|
|
|
|
else if (1 == charset_to_unicode(&p, &len, wc, 1,
|
|
|
|
ucsdata->line_codepage,
|
|
|
|
NULL, L"", 0))
|
|
|
|
ucsdata->unitab_line[i] = wc[0];
|
|
|
|
else
|
|
|
|
ucsdata->unitab_line[i] = 0xFFFD;
|
2002-12-31 12:20:34 +00:00
|
|
|
}
|
2002-10-09 18:09:42 +00:00
|
|
|
|
2002-12-31 12:20:34 +00:00
|
|
|
/*
|
|
|
|
* Set up unitab_xterm. This is the same as unitab_line except
|
|
|
|
* in the line-drawing regions, where it follows the Unicode
|
|
|
|
* encoding.
|
2019-09-08 19:29:00 +00:00
|
|
|
*
|
2002-12-31 12:20:34 +00:00
|
|
|
* (Note that the strange X encoding of line-drawing characters
|
|
|
|
* in the bottom 32 glyphs of ISO8859-1 fonts is taken care of
|
|
|
|
* by the font encoding, which will spot such a font and act as
|
|
|
|
* if it were in a variant encoding of ISO8859-1.)
|
|
|
|
*/
|
2002-10-09 18:09:42 +00:00
|
|
|
for (i = 0; i < 256; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
static const wchar_t unitab_xterm_std[32] = {
|
|
|
|
0x2666, 0x2592, 0x2409, 0x240c, 0x240d, 0x240a, 0x00b0, 0x00b1,
|
|
|
|
0x2424, 0x240b, 0x2518, 0x2510, 0x250c, 0x2514, 0x253c, 0x23ba,
|
|
|
|
0x23bb, 0x2500, 0x23bc, 0x23bd, 0x251c, 0x2524, 0x2534, 0x252c,
|
|
|
|
0x2502, 0x2264, 0x2265, 0x03c0, 0x2260, 0x00a3, 0x00b7, 0x0020
|
|
|
|
};
|
|
|
|
static const wchar_t unitab_xterm_poorman[32] =
|
|
|
|
L"*#****o~**+++++-----++++|****L. ";
|
2003-04-26 14:22:42 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
const wchar_t *ptr;
|
2003-04-26 14:22:42 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (vtmode == VT_POORMAN)
|
|
|
|
ptr = unitab_xterm_poorman;
|
|
|
|
else
|
|
|
|
ptr = unitab_xterm_std;
|
2003-04-26 14:22:42 +00:00
|
|
|
|
2019-09-08 19:29:00 +00:00
|
|
|
if (i >= 0x5F && i < 0x7F)
|
|
|
|
ucsdata->unitab_xterm[i] = ptr[i & 0x1F];
|
|
|
|
else
|
|
|
|
ucsdata->unitab_xterm[i] = ucsdata->unitab_line[i];
|
2002-10-09 18:09:42 +00:00
|
|
|
}
|
2002-12-31 12:20:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up unitab_scoacs. The SCO Alternate Character Set is
|
|
|
|
* simply CP437.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 256; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
char c[1];
|
2011-09-16 19:18:52 +00:00
|
|
|
const char *p;
|
2019-09-08 19:29:00 +00:00
|
|
|
wchar_t wc[1];
|
|
|
|
int len;
|
|
|
|
c[0] = i;
|
|
|
|
p = c;
|
|
|
|
len = 1;
|
|
|
|
if (1 == charset_to_unicode(&p, &len, wc, 1, CS_CP437, NULL, L"", 0))
|
|
|
|
ucsdata->unitab_scoacs[i] = wc[0];
|
|
|
|
else
|
|
|
|
ucsdata->unitab_scoacs[i] = 0xFFFD;
|
2002-12-31 12:20:34 +00:00
|
|
|
}
|
|
|
|
|
2003-01-01 22:25:25 +00:00
|
|
|
/*
|
|
|
|
* Find the control characters in the line codepage. For
|
|
|
|
* direct-to-font mode using the D800 hack, we assume 00-1F and
|
|
|
|
* 7F are controls, but allow 80-9F through. (It's as good a
|
|
|
|
* guess as anything; and my bet is that half the weird fonts
|
|
|
|
* used in this way will be IBM or MS code pages anyway.)
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 256; i++) {
|
2019-09-08 19:29:00 +00:00
|
|
|
int lineval = ucsdata->unitab_line[i];
|
|
|
|
if (lineval < ' ' || (lineval >= 0x7F && lineval < 0xA0) ||
|
|
|
|
(lineval >= 0xD800 && lineval < 0xD820) || (lineval == 0xD87F))
|
|
|
|
ucsdata->unitab_ctrl[i] = i;
|
|
|
|
else
|
|
|
|
ucsdata->unitab_ctrl[i] = 0xFF;
|
2003-01-01 22:25:25 +00:00
|
|
|
}
|
2003-01-02 16:17:56 +00:00
|
|
|
|
|
|
|
return ret;
|
2002-10-15 16:52:45 +00:00
|
|
|
}
|
2003-04-05 16:36:11 +00:00
|
|
|
|
2023-02-18 14:10:01 +00:00
|
|
|
void init_ucs_generic(Conf *conf, struct unicode_data *ucsdata)
|
|
|
|
{
|
|
|
|
init_ucs(ucsdata, conf_get_str(conf, CONF_line_codepage),
|
|
|
|
conf_get_bool(conf, CONF_utf8_override),
|
|
|
|
CS_NONE, conf_get_int(conf, CONF_vtmode));
|
|
|
|
}
|
|
|
|
|
2003-04-05 16:36:11 +00:00
|
|
|
const char *cp_name(int codepage)
|
|
|
|
{
|
|
|
|
if (codepage == CS_NONE)
|
2019-09-08 19:29:00 +00:00
|
|
|
return "Use font encoding";
|
2003-04-05 16:36:11 +00:00
|
|
|
return charset_to_localenc(codepage);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *cp_enumerate(int index)
|
|
|
|
{
|
|
|
|
int charset;
|
2013-05-25 14:03:19 +00:00
|
|
|
charset = charset_localenc_nth(index);
|
|
|
|
if (charset == CS_NONE) {
|
|
|
|
/* "Use font encoding" comes after all the named charsets */
|
|
|
|
if (charset_localenc_nth(index-1) != CS_NONE)
|
|
|
|
return "Use font encoding";
|
2019-09-08 19:29:00 +00:00
|
|
|
return NULL;
|
2013-05-25 14:03:19 +00:00
|
|
|
}
|
2003-04-05 16:36:11 +00:00
|
|
|
return charset_to_localenc(charset);
|
|
|
|
}
|
|
|
|
|
2022-06-01 07:29:29 +00:00
|
|
|
int decode_codepage(const char *cp_name)
|
2003-04-05 16:36:11 +00:00
|
|
|
{
|
2013-07-22 07:12:05 +00:00
|
|
|
if (!cp_name || !*cp_name)
|
2019-09-08 19:29:00 +00:00
|
|
|
return CS_UTF8;
|
2003-04-05 16:36:11 +00:00
|
|
|
return charset_from_localenc(cp_name);
|
|
|
|
}
|