1
0
mirror of https://git.tartarus.org/simon/putty.git synced 2025-01-09 01:18:00 +00:00
putty-source/Buildscr

339 lines
21 KiB
Plaintext
Raw Normal View History

# -*- sh -*-
# Build script to construct a full distribution directory of PuTTY.
module putty
# Start by figuring out what our version information looks like.
#
# There are four classes of PuTTY build:
# - a release, which just has an X.YY version number
# - a prerelease, which has an X.YY version number, plus a date and
# version control commit id (and is considered to be 'almost'
# version X.YY)
# - a development snapshot, which just has a date and commit id
# - a custom build, which also has a date and commit id (but is
# labelled differently, to stress that development snapshots are
# built from the checked-in code by the automated nightly script
# whereas custom builds are made manually, perhaps from uncommitted
# changes, e.g. to send to a user for diagnostic or testing
# purposes).
#
# The four classes of build are triggered by invoking bob with
# different command-line variable definitions:
#
# - RELEASE=X.YY makes a release build
# - PRERELEASE=X.YY makes a prerelease build (combined with the build
# date and VCS info)
# - setting SNAPSHOT to any non-empty string makes a development
# snapshot
# - setting none of these makes a custom build.
# If we need a date for our build, start by computing it in the
# various forms we need. $(Ndate) is the date in purely numeric form
# (YYYYMMDD); $(Date) is separated as YYYY-MM-DD; $(Days) is the
# number of days since the epoch.
ifeq "$(RELEASE)" "" set Ndate $(!builddate)
ifneq "$(Ndate)" "" in . do echo $(Ndate) | perl -pe 's/(....)(..)(..)/$$1-$$2-$$3/' > date
ifneq "$(Ndate)" "" read Date date
set Epoch 19052 # update this at every release
ifneq "$(Ndate)" "" in . do echo $(Ndate) | perl -ne 'use Time::Local; /(....)(..)(..)/ and print timegm(0,0,0,$$3,$$2-1,$$1) / 86400 - $(Epoch)' > days
ifneq "$(Ndate)" "" read Days days
# For any non-release, we're going to need the number of the prior
# release, for putting in various places so as to get monotonic
# comparisons with the surrounding actual releases.
ifeq "$(RELEASE)" "" read Lastver putty/LATEST.VER
# Set up the textual version strings for the docs build and installers.
# We have one of these including the word 'PuTTY', and one without,
# which are inconveniently capitalised differently.
ifneq "$(RELEASE)" "" set Puttytextver PuTTY release $(RELEASE)
ifneq "$(RELEASE)" "" set Textver Release $(RELEASE)
ifneq "$(PRERELEASE)" "" set Puttytextver PuTTY pre-release $(PRERELEASE):$(Date).$(vcsid)
ifneq "$(PRERELEASE)" "" set Textver Pre-release $(PRERELEASE):$(Date).$(vcsid)
ifneq "$(SNAPSHOT)" "" set Puttytextver PuTTY development snapshot $(Date).$(vcsid)
ifneq "$(SNAPSHOT)" "" set Textver Development snapshot $(Date).$(vcsid)
ifeq "$(RELEASE)$(PRERELEASE)$(SNAPSHOT)" "" set Puttytextver PuTTY custom build $(Date).$(vcsid)
ifeq "$(RELEASE)$(PRERELEASE)$(SNAPSHOT)" "" set Textver Custom build $(Date).$(vcsid)
in putty/doc do echo "\\\\versionid $(Puttytextver)" > version.but
# Set up the version string for use in the SSH connection greeting.
#
# We use $(Ndate) rather than $(Date) in the pre-release string to
# make sure it's under 40 characters, which is a hard limit in the SSH
# protocol spec (and enforced by a compile-time assertion in
# version.c).
ifneq "$(RELEASE)" "" set Sshver -Release-$(RELEASE)
ifneq "$(PRERELEASE)" "" set Sshver -Prerelease-$(PRERELEASE):$(Ndate).$(vcsid)
ifneq "$(SNAPSHOT)" "" set Sshver -Snapshot-$(Date).$(vcsid)
ifeq "$(RELEASE)$(PRERELEASE)$(SNAPSHOT)" "" set Sshver -Custom-$(Date).$(vcsid)
# Set up the filename suffix for the Unix source archive.
ifneq "$(RELEASE)" "" set Uxarcsuffix -$(RELEASE)
ifneq "$(PRERELEASE)" "" set Uxarcsuffix -$(PRERELEASE)~pre$(Ndate).$(vcsid)
ifneq "$(SNAPSHOT)" "" set Uxarcsuffix -$(Lastver)-$(Date).$(vcsid)
ifeq "$(RELEASE)$(PRERELEASE)$(SNAPSHOT)" "" set Uxarcsuffix -custom-$(Date).$(vcsid)
# Set up the filenames for the Windows installers (minus extension,
# which goes on later).
ifneq "$(RELEASE)" "" set Isuffix $(RELEASE)-installer
ifneq "$(PRERELEASE)" "" set Isuffix $(PRERELEASE)-pre$(Ndate)-installer
ifneq "$(SNAPSHOT)" "" set Isuffix $(Date)-installer
ifeq "$(RELEASE)$(PRERELEASE)$(SNAPSHOT)" "" set Isuffix custom-$(Date)-installer
set Ifilename32 putty-$(Isuffix)
set Ifilename64 putty-64bit-$(Isuffix)
Build MSI installers for Arm Windows. I expected this to be nightmarish because WiX 3 doesn't know about the Windows on Arm platform at all. Fortunately, it turns out that it doesn't have to: testing on a borrowed machine I find that Windows on Arm's msiexec.exe is quite happy to take MSIs whose platform field in the _SummaryInformation table says "Intel". In fact, that seemed to be _all_ that my test machine would accept: I tried taking the MSI apart with msidump, putting some other value in there (e.g. "Arm64" or "Arm") and rebuilding it with msibuild, and all I got was messages from msiexec saying "This installation package is not supported by this processor type." So in fact I just give WiX the same -arch x86 option that I give it for the real 32-bit x86 Windows installer, but then I point it at the Arm binaries, and that seems to produce a viable MSI. There is the unfortunate effect that msiexec forcibly sets the default install location to 'Program Files (x86)' no matter how I strive to make it set it any other way, but that's only cosmetic: the programs _run_ just fine no matter which Program Files directory they're installed into (and I know this won't be the first piece of software that installs itself into the wrong one). Perhaps some day we can find a way to do that part better. On general principles of caution (and of not really wanting to force Arm machines to emulate x86 code at all), the Arm versions of the installers have the new DllOk=no flag, so they're pure MSI with no embedded DLLs.
2018-05-31 18:31:20 +00:00
set Ifilenamea32 putty-arm32-$(Isuffix)
set Ifilenamea64 putty-arm64-$(Isuffix)
# Set up the Windows version resource info, for both the installers and
# the individual programs. This must be a sequence of four 16-bit
# integers compared lexicographically, and we define it as follows:
#
# For release X.YY: X.YY.0.0
# For a prerelease before the X.YY release: (X.YY-1).(DDDDD + 0x8000).0
# For a devel snapshot after the X.YY release: X.YY.DDDDD.0
# For a custom build: X.YY.DDDDD.1
#
# where DDDDD is a representation of the build date, in the form of a
# number of days since an epoch date. The epoch is reset at every
# release (which, with 15 bits, gives us a comfortable 80-odd years
# before it becomes vital to make another release to reset the count
# :-).
ifneq "$(RELEASE)" "" in . do echo $(RELEASE).0.0 > winver
ifneq "$(PRERELEASE)" "" in . do perl -e 'printf "%s.%d.0", $$ARGV[0], 0x8000+$$ARGV[1]' $(Lastver) $(Days) > winver
ifneq "$(SNAPSHOT)" "" in . do perl -e 'printf "%s.%d.0", $$ARGV[0], $$ARGV[1]' $(Lastver) $(Days) > winver
ifeq "$(RELEASE)$(PRERELEASE)$(SNAPSHOT)" "" in . do perl -e 'printf "%s.%d.1", $$ARGV[0], $$ARGV[1]' $(Lastver) $(Days) > winver
in . do perl -pe 'y!.!,!' winver > winvercommas
read Winver winver
read Winvercommas winvercommas
# Write out a version.h that contains the real version number.
in putty do echo '/* Generated by automated build script */' > version.h
ifneq "$(RELEASE)" "" in putty do echo '$#define RELEASE $(RELEASE)' >> version.h
ifneq "$(PRERELEASE)" "" in putty do echo '$#define PRERELEASE $(PRERELEASE)' >> version.h
ifneq "$(SNAPSHOT)" "" in putty do echo '$#define SNAPSHOT' >> version.h
in putty do echo '$#define TEXTVER "$(Textver)"' >> version.h
in putty do echo '$#define SSHVER "$(Sshver)"' >> version.h
in putty do echo '$#define BINARY_VERSION $(Winvercommas)' >> version.h
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
# In cmake/gitcommit.cmake, replace the default output "unavailable"
# with the commit string generated by bob, so that people rebuilding
# the source archive will still get a useful value.
in putty do sed -i '/set(DEFAULT_COMMIT/s/unavailable/$(vcsfullid)/' cmake/gitcommit.cmake
in . do mkdir docbuild
in docbuild do cmake ../putty/doc
in docbuild do make -j$(nproc) VERBOSE=1
in putty/doc do cp ../../docbuild/*.1 .
in putty/doc do cp ../../docbuild/puttydoc.txt .
in putty/doc do cp ../../docbuild/putty.chm .
in putty/doc do cp -r ../../docbuild/html .
in putty do ./mksrcarc.sh
in putty do ./mkunxarc.sh '$(Uxarcsuffix)'
delegate -
# Run the test suite, under self-delegation so that we don't leave any
# cruft lying around. This involves doing a build of the Unix tools
# (which is a useful double-check anyway to pick up any build failures)
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty do cmake . -DCMAKE_C_COMPILER=clang -DCMAKE_C_FLAGS="-fsanitize=address -fsanitize=leak" -DSTRICT=ON
in putty do make -j$(nproc) VERBOSE=1
in putty do python3 test/cryptsuite.py
in putty do ./test_lineedit
in putty do ./test_terminal
in putty do ./test_conf
enddelegate
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
delegate -
# Also, test-build the Windows tools using MinGW. This is important if
# we want the MinGW build to carry on working, partly because of the
# chance of compiler compatibility issues, but mostly because MinGW's
# linker uses Unix-style library search semantics (once down the
# library list), and no other Windows toolchain we build with is that
# picky. So this ensures the Windows library structure continues to
# work in the most difficult circumstance we expect it to encounter.
in putty do cmake . -DCMAKE_TOOLCHAIN_FILE=cmake/toolchain-mingw.cmake -DSTRICT=ON
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty do make -j$(nproc) VERBOSE=1
enddelegate
# Windowsify LICENCE, since it's going in the Windows installers.
in putty do perl -i~ -pe 'y/\015//d;s/$$/\015/' LICENCE
# Some gratuitous theming for the MSI installer UI.
in putty/icons do make -j$(nproc)
Remove white dialog background in MSI user interface. We received a report that if you enable Windows 10's high-contrast mode, the text in PuTTY's installer UI becomes invisible, because it's displayed in the system default foreground colour against a background of the white right-hand side of our 'msidialog.bmp' image. That's fine when the system default fg is black, but high-contrast mode flips it to white, and now you have white on white text, oops. Some research in the WiX bug tracker suggests that in Windows 10 you don't actually have to use BMP files for your installer images any more: you can use PNG, and PNGs can be transparent. However, someone else reported that that only works in up-to-date versions of Windows. And in fact there's no need to go that far. A more elegant answer is to simply not cover the whole dialog box with our background image in the first place. I've reduced the size of the background image so that it _only_ contains the pretty picture on the left-hand side, and omits the big white rectangle that used to sit under the text. So now the RHS of the dialog is not covered by any image at all, which has the same effect as it being covered with a transparent image, except that it doesn't require transparency support from msiexec. Either way, the background for the text ends up being the system's default dialog-box background, in the absence of any images or controls placed on top of it - so when the high-contrast mode is enabled, it flips to black at the same time as the text flips to white, and everything works as it should. The slight snag is that the pre-cooked WiX UI dialog specifications let you override the background image itself, but not the Width and Height fields in the control specifications that refer to them. So if you just try to drop in a narrow image in the most obvious way, it gets stretched across the whole window. But that's not a show-stopper, because we're not 100% dependent on getting WiX to produce exactly the right output. We already have the technology to postprocess the MSI _after_ it comes out of WiX: we're using it to fiddle the target-platform field for the Windows on Arm installers. So all I had to do was to turn msiplatform.py into a more general msifixup.py, add a second option to change the width of the dialog background image, and run it on the x86 installers as well as the Arm ones.
2020-02-11 19:11:02 +00:00
in putty do ./windows/make_install_images.sh
mkdir putty/windows/build32
mkdir putty/windows/build64
mkdir putty/windows/buildold
mkdir putty/windows/abuild32
mkdir putty/windows/abuild64
# Build the binaries to go in the installers, in both 32- and 64-bit
# flavours.
#
# For the 32-bit ones, we set a subsystem version of 5.01, which
# allows the resulting files to still run on Windows XP.
in putty/windows/build32 with cmake_at_least_3.20 do cmake ../.. -DCMAKE_TOOLCHAIN_FILE=$(cmake_toolchain_clangcl32) -DCMAKE_BUILD_TYPE=Release -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DPUTTY_LINK_MAPS=ON -DCMAKE_C_FLAGS_RELEASE="/MT /O2" -DPUTTY_SUBSYSTEM_VERSION=5.01 -DSTRICT=ON
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/build32 with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
in putty/windows/build64 with cmake_at_least_3.20 do cmake ../.. -DCMAKE_TOOLCHAIN_FILE=$(cmake_toolchain_clangcl64) -DCMAKE_BUILD_TYPE=Release -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DPUTTY_LINK_MAPS=ON -DCMAKE_C_FLAGS_RELEASE="/MT /O2" -DSTRICT=ON
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/build64 with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
# The cmake mechanism used to set the subsystem version is a bit of a
# bodge (it depends on knowing how cmake set up all its build command
# variables), so just in case it breaks in future, double-check we
# really did get the subsystem version we wanted.
in putty/windows/build32 do objdump -x putty.exe > exe-headers.txt
in putty/windows/build32 do grep -Ex 'MajorOSystemVersion[[:space:]]+5' exe-headers.txt
in putty/windows/build32 do grep -Ex 'MinorOSystemVersion[[:space:]]+1' exe-headers.txt
in putty/windows/build32 do grep -Ex 'MajorSubsystemVersion[[:space:]]+5' exe-headers.txt
in putty/windows/build32 do grep -Ex 'MinorSubsystemVersion[[:space:]]+1' exe-headers.txt
# Build experimental Arm Windows binaries.
in putty/windows/abuild32 with cmake_at_least_3.20 do cmake ../.. -DCMAKE_TOOLCHAIN_FILE=$(cmake_toolchain_clangcl_a32) -DCMAKE_BUILD_TYPE=Release -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DPUTTY_LINK_MAPS=ON -DCMAKE_C_FLAGS_RELEASE="/MT /O2" -DSTRICT=ON
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/abuild32 with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
in putty/windows/abuild64 with cmake_at_least_3.20 do cmake ../.. -DCMAKE_TOOLCHAIN_FILE=$(cmake_toolchain_clangcl_a64) -DCMAKE_BUILD_TYPE=Release -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DPUTTY_LINK_MAPS=ON -DCMAKE_C_FLAGS_RELEASE="/MT /O2" -DSTRICT=ON
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/abuild64 with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
# Make a list of the Windows binaries we're going to ship, so that we
# can sign them.
in putty/windows do for subdir in build32 abuild32 build64 abuild64; do sed "s!^!$$subdir/!" $$subdir/shipped.txt; done > to-sign.txt
# Code-sign the Windows binaries, if the local bob config provides a
# script to do so in a cross-compiling way. We assume here that the
# script accepts an -i option to provide a 'more info' URL, an
# optional -n option to provide a program name, and an -N option to
# take the program name from an .exe's version resource, and that it
# can accept multiple .exe or .msi filename arguments and sign them
# all in place.
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
ifneq "$(cross_winsigncode)" "" in putty/windows do $(cross_winsigncode) -N -i https://www.chiark.greenend.org.uk/~sgtatham/putty/ $$(cat to-sign.txt)
# Make a preliminary set of cryptographic checksums giving the hashes
# of these versions of the binaries. We'll make the rest below.
in putty do for hash in md5 sha1 sha256 sha512; do for dir_plat in "build32 w32" "build64 w64" "abuild32 wa32" "abuild64 wa64"; do set -- $$dir_plat; (cd windows/$$1 && $${hash}sum *.exe | sed 's!\( \+\)!\1'$$2'/!;s!$$! (installer version)!') >> $${hash}sums.installer; done; done
# Build a WiX MSI installer, for each build flavour.
in putty/windows with wixonlinux do candle -arch x86 -dRealPlatform=x86 -dDllOk=yes -dBuilddir=build32/ -dWinver="$(Winver)" -dPuttytextver="$(Puttytextver)" -dHelpFilePath="../../docbuild/putty.chm" installer.wxs && light -ext WixUIExtension -ext WixUtilExtension -sval installer.wixobj -o installer32.msi -spdb
in putty/windows with wixonlinux do candle -arch x64 -dRealPlatform=x64 -dDllOk=yes -dBuilddir=build64/ -dWinver="$(Winver)" -dPuttytextver="$(Puttytextver)" -dHelpFilePath="../../docbuild/putty.chm" installer.wxs && light -ext WixUIExtension -ext WixUtilExtension -sval installer.wixobj -o installer64.msi -spdb
in putty/windows with wixonlinux do candle -arch x64 -dRealPlatform=Arm -dDllOk=no -dBuilddir=abuild32/ -dWinver="$(Winver)" -dPuttytextver="$(Puttytextver)" -dHelpFilePath="../../docbuild/putty.chm" installer.wxs && light -ext WixUIExtension -ext WixUtilExtension -sval installer.wixobj -o installera32.msi -spdb
in putty/windows with wixonlinux do candle -arch x64 -dRealPlatform=Arm64 -dDllOk=no -dBuilddir=abuild64/ -dWinver="$(Winver)" -dPuttytextver="$(Puttytextver)" -dHelpFilePath="../../docbuild/putty.chm" installer.wxs && light -ext WixUIExtension -ext WixUtilExtension -sval installer.wixobj -o installera64.msi -spdb
2018-08-16 18:01:36 +00:00
Remove white dialog background in MSI user interface. We received a report that if you enable Windows 10's high-contrast mode, the text in PuTTY's installer UI becomes invisible, because it's displayed in the system default foreground colour against a background of the white right-hand side of our 'msidialog.bmp' image. That's fine when the system default fg is black, but high-contrast mode flips it to white, and now you have white on white text, oops. Some research in the WiX bug tracker suggests that in Windows 10 you don't actually have to use BMP files for your installer images any more: you can use PNG, and PNGs can be transparent. However, someone else reported that that only works in up-to-date versions of Windows. And in fact there's no need to go that far. A more elegant answer is to simply not cover the whole dialog box with our background image in the first place. I've reduced the size of the background image so that it _only_ contains the pretty picture on the left-hand side, and omits the big white rectangle that used to sit under the text. So now the RHS of the dialog is not covered by any image at all, which has the same effect as it being covered with a transparent image, except that it doesn't require transparency support from msiexec. Either way, the background for the text ends up being the system's default dialog-box background, in the absence of any images or controls placed on top of it - so when the high-contrast mode is enabled, it flips to black at the same time as the text flips to white, and everything works as it should. The slight snag is that the pre-cooked WiX UI dialog specifications let you override the background image itself, but not the Width and Height fields in the control specifications that refer to them. So if you just try to drop in a narrow image in the most obvious way, it gets stretched across the whole window. But that's not a show-stopper, because we're not 100% dependent on getting WiX to produce exactly the right output. We already have the technology to postprocess the MSI _after_ it comes out of WiX: we're using it to fiddle the target-platform field for the Windows on Arm installers. So all I had to do was to turn msiplatform.py into a more general msifixup.py, add a second option to change the width of the dialog background image, and run it on the x86 installers as well as the Arm ones.
2020-02-11 19:11:02 +00:00
# Change the width field for our dialog background image so that it
# doesn't stretch across the whole dialog. (WiX's default one does; we
# replace it with a narrow one so that the text to the right of it
# shows up on system default background colour, meaning that
# high-contrast mode doesn't make the text white on white. But that
# means we also have to modify the width field, and there's nothing in
# WiX's source syntax to make that happen.)
#
# Also bodge the platform fields for the Windows on Arm installers,
# since WiX 3 doesn't understand Arm platform names itself.
in putty/windows do ./msifixup.py installer32.msi --dialog-bmp-width=123
in putty/windows do ./msifixup.py installer64.msi --dialog-bmp-width=123
in putty/windows do ./msifixup.py installera32.msi --dialog-bmp-width=123 --platform=Arm
in putty/windows do ./msifixup.py installera64.msi --dialog-bmp-width=123 --platform=Arm64
# Sign the Windows installers.
Build MSI installers for Arm Windows. I expected this to be nightmarish because WiX 3 doesn't know about the Windows on Arm platform at all. Fortunately, it turns out that it doesn't have to: testing on a borrowed machine I find that Windows on Arm's msiexec.exe is quite happy to take MSIs whose platform field in the _SummaryInformation table says "Intel". In fact, that seemed to be _all_ that my test machine would accept: I tried taking the MSI apart with msidump, putting some other value in there (e.g. "Arm64" or "Arm") and rebuilding it with msibuild, and all I got was messages from msiexec saying "This installation package is not supported by this processor type." So in fact I just give WiX the same -arch x86 option that I give it for the real 32-bit x86 Windows installer, but then I point it at the Arm binaries, and that seems to produce a viable MSI. There is the unfortunate effect that msiexec forcibly sets the default install location to 'Program Files (x86)' no matter how I strive to make it set it any other way, but that's only cosmetic: the programs _run_ just fine no matter which Program Files directory they're installed into (and I know this won't be the first piece of software that installs itself into the wrong one). Perhaps some day we can find a way to do that part better. On general principles of caution (and of not really wanting to force Arm machines to emulate x86 code at all), the Arm versions of the installers have the new DllOk=no flag, so they're pure MSI with no embedded DLLs.
2018-05-31 18:31:20 +00:00
ifneq "$(cross_winsigncode)" "" in putty/windows do $(cross_winsigncode) -i https://www.chiark.greenend.org.uk/~sgtatham/putty/ -n "PuTTY Installer" installer32.msi installer64.msi installera32.msi installera64.msi
# Build the standalone binaries, in both 32- and 64-bit flavours.
# These differ from the previous set in that they embed the help file.
in putty/windows/build32 with cmake_at_least_3.20 do cmake . -DPUTTY_EMBEDDED_CHM_FILE=$$(realpath ../../../docbuild/putty.chm)
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/build32 with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
in putty/windows/build64 with cmake_at_least_3.20 do cmake . -DPUTTY_EMBEDDED_CHM_FILE=$$(realpath ../../../docbuild/putty.chm)
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/build64 with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
in putty/windows/abuild32 with cmake_at_least_3.20 do cmake . -DPUTTY_EMBEDDED_CHM_FILE=$$(realpath ../../../docbuild/putty.chm)
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/abuild32 with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
in putty/windows/abuild64 with cmake_at_least_3.20 do cmake . -DPUTTY_EMBEDDED_CHM_FILE=$$(realpath ../../../docbuild/putty.chm)
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/abuild64 with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
# Build the 'old' binaries, which should still run on all 32-bit
# versions of Windows back to Win95 (but not Win32s). These link
# against Visual Studio 2003 libraries (the more modern versions
# assume excessively modern Win32 API calls to be available), specify
# a subsystem version of 4.0, and compile with /arch:IA32 to prevent
# the use of modern CPU features like MMX which older machines also
# might not have.
#
# There's no installer to go with these, so they must also embed the
# help file.
in putty/windows/buildold with cmake_at_least_3.20 do cmake ../.. -DCMAKE_TOOLCHAIN_FILE=$(cmake_toolchain_clangcl32_2003) -DCMAKE_BUILD_TYPE=Release -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DPUTTY_LINK_MAPS=ON -DSTRICT=ON
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
in putty/windows/buildold with cmake_at_least_3.20 do make -j$(nproc) VERBOSE=1
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
# Regenerate to-sign.txt with the 'old' binaries included.
in putty/windows do for subdir in build32 abuild32 build64 abuild64 buildold; do sed "s!^!$$subdir/!" $$subdir/shipped.txt; done > to-sign.txt
# Code-sign the standalone versions of the binaries.
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
ifneq "$(cross_winsigncode)" "" in putty/windows do $(cross_winsigncode) -N -i https://www.chiark.greenend.org.uk/~sgtatham/putty/ $$(cat to-sign.txt)
# Move the shipped (and signed) binaries into another directory to
# deliver them from, so that we omit testcrypt and its ilk.
in putty/windows do mkdir deliver
in putty/windows do for subdir in build32 abuild32 build64 abuild64 buildold; do mkdir deliver/$$subdir; done
in putty/windows do while read x; do mv $$x deliver/$$x; mv $$x.map deliver/$$x.map; done < to-sign.txt
# Make putty.zip in each Windows directory. We add the files one by
# one, because 'zip' exits with a success status if it managed to add
# _at least one_ of the input files, even if another didn't exist. So
# doing them one at a time gets us proper error reporting.
in putty/windows/deliver/buildold do for file in *.exe ../../../../docbuild/putty.chm; do case "$$file" in puttytel.exe | pterm.exe) ;; *) zip -k -j putty.zip "$$file";; esac; done
in putty/windows/deliver/build32 do for file in *.exe ../../../../docbuild/putty.chm; do case "$$file" in puttytel.exe | pterm.exe) ;; *) zip -k -j putty.zip "$$file";; esac; done
in putty/windows/deliver/build64 do for file in *.exe ../../../../docbuild/putty.chm; do case "$$file" in puttytel.exe | pterm.exe) ;; *) zip -k -j putty.zip "$$file";; esac; done
in putty/windows/deliver/abuild32 do for file in *.exe ../../../../docbuild/putty.chm; do case "$$file" in puttytel.exe | pterm.exe) ;; *) zip -k -j putty.zip "$$file";; esac; done
in putty/windows/deliver/abuild64 do for file in *.exe ../../../../docbuild/putty.chm; do case "$$file" in puttytel.exe | pterm.exe) ;; *) zip -k -j putty.zip "$$file";; esac; done
in docbuild/html do for file in *.html; do case "$$file" in puttytel.exe | pterm.exe) ;; *) zip puttydoc.zip "$$file";; esac; done
# Deliver the actual PuTTY release directory into a subdir `putty'.
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
deliver putty/windows/deliver/buildold/*.exe putty/w32old/$@
deliver putty/windows/deliver/buildold/putty.zip putty/w32old/$@
deliver putty/windows/deliver/build32/*.exe putty/w32/$@
deliver putty/windows/deliver/build32/putty.zip putty/w32/$@
deliver putty/windows/deliver/build64/*.exe putty/w64/$@
deliver putty/windows/deliver/build64/putty.zip putty/w64/$@
deliver putty/windows/installer32.msi putty/w32/$(Ifilename32).msi
deliver putty/windows/installer64.msi putty/w64/$(Ifilename64).msi
Build MSI installers for Arm Windows. I expected this to be nightmarish because WiX 3 doesn't know about the Windows on Arm platform at all. Fortunately, it turns out that it doesn't have to: testing on a borrowed machine I find that Windows on Arm's msiexec.exe is quite happy to take MSIs whose platform field in the _SummaryInformation table says "Intel". In fact, that seemed to be _all_ that my test machine would accept: I tried taking the MSI apart with msidump, putting some other value in there (e.g. "Arm64" or "Arm") and rebuilding it with msibuild, and all I got was messages from msiexec saying "This installation package is not supported by this processor type." So in fact I just give WiX the same -arch x86 option that I give it for the real 32-bit x86 Windows installer, but then I point it at the Arm binaries, and that seems to produce a viable MSI. There is the unfortunate effect that msiexec forcibly sets the default install location to 'Program Files (x86)' no matter how I strive to make it set it any other way, but that's only cosmetic: the programs _run_ just fine no matter which Program Files directory they're installed into (and I know this won't be the first piece of software that installs itself into the wrong one). Perhaps some day we can find a way to do that part better. On general principles of caution (and of not really wanting to force Arm machines to emulate x86 code at all), the Arm versions of the installers have the new DllOk=no flag, so they're pure MSI with no embedded DLLs.
2018-05-31 18:31:20 +00:00
deliver putty/windows/installera32.msi putty/wa32/$(Ifilenamea32).msi
deliver putty/windows/installera64.msi putty/wa64/$(Ifilenamea64).msi
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
deliver putty/windows/deliver/abuild32/*.exe putty/wa32/$@
deliver putty/windows/deliver/abuild32/putty.zip putty/wa32/$@
deliver putty/windows/deliver/abuild64/*.exe putty/wa64/$@
deliver putty/windows/deliver/abuild64/putty.zip putty/wa64/$@
deliver docbuild/html/puttydoc.zip putty/$@
deliver docbuild/putty.chm putty/$@
deliver docbuild/puttydoc.txt putty/$@
deliver docbuild/html/*.html putty/htmldoc/$@
deliver putty/putty-src.zip putty/$@
deliver putty/*.tar.gz putty/$@
# Deliver the map files alongside the `proper' release deliverables.
Replace mkfiles.pl with a CMake build system. This brings various concrete advantages over the previous system: - consistent support for out-of-tree builds on all platforms - more thorough support for Visual Studio IDE project files - support for Ninja-based builds, which is particularly useful on Windows where the alternative nmake has no parallel option - a really simple set of build instructions that work the same way on all the major platforms (look how much shorter README is!) - better decoupling of the project configuration from the toolchain configuration, so that my Windows cross-building doesn't need (much) special treatment in CMakeLists.txt - configure-time tests on Windows as well as Linux, so that a lot of ad-hoc #ifdefs second-guessing a particular feature's presence from the compiler version can now be replaced by tests of the feature itself Also some longer-term software-engineering advantages: - other people have actually heard of CMake, so they'll be able to produce patches to the new build setup more easily - unlike the old mkfiles.pl, CMake is not my personal problem to maintain - most importantly, mkfiles.pl was just a horrible pile of unmaintainable cruft, which even I found it painful to make changes to or to use, and desperately needed throwing in the bin. I've already thrown away all the variants of it I had in other projects of mine, and was only delaying this one so we could make the 0.75 release branch first. This change comes with a noticeable build-level restructuring. The previous Recipe worked by compiling every object file exactly once, and then making each executable by linking a precisely specified subset of the same object files. But in CMake, that's not the natural way to work - if you write the obvious command that puts the same source file into two executable targets, CMake generates a makefile that compiles it once per target. That can be an advantage, because it gives you the freedom to compile it differently in each case (e.g. with a #define telling it which program it's part of). But in a project that has many executable targets and had carefully contrived to _never_ need to build any module more than once, all it does is bloat the build time pointlessly! To avoid slowing down the build by a large factor, I've put most of the modules of the code base into a collection of static libraries organised vaguely thematically (SSH, other backends, crypto, network, ...). That means all those modules can still be compiled just once each, because once each library is built it's reused unchanged for all the executable targets. One upside of this library-based structure is that now I don't have to manually specify exactly which objects go into which programs any more - it's enough to specify which libraries are needed, and the linker will figure out the fine detail automatically. So there's less maintenance to do in CMakeLists.txt when the source code changes. But that reorganisation also adds fragility, because of the trad Unix linker semantics of walking along the library list once each, so that cyclic references between your libraries will provoke link errors. The current setup builds successfully, but I suspect it only just manages it. (In particular, I've found that MinGW is the most finicky on this score of the Windows compilers I've tried building with. So I've included a MinGW test build in the new-look Buildscr, because otherwise I think there'd be a significant risk of introducing MinGW-only build failures due to library search order, which wasn't a risk in the previous library-free build organisation.) In the longer term I hope to be able to reduce the risk of that, via gradual reorganisation (in particular, breaking up too-monolithic modules, to reduce the risk of knock-on references when you included a module for function A and it also contains function B with an unsatisfied dependency you didn't really need). Ideally I want to reach a state in which the libraries all have sensibly described purposes, a clearly documented (partial) order in which they're permitted to depend on each other, and a specification of what stubs you have to put where if you're leaving one of them out (e.g. nocrypto) and what callbacks you have to define in your non-library objects to satisfy dependencies from things low in the stack (e.g. out_of_memory()). One thing that's gone completely missing in this migration, unfortunately, is the unfinished MacOS port linked against Quartz GTK. That's because it turned out that I can't currently build it myself, on my own Mac: my previous installation of GTK had bit-rotted as a side effect of an Xcode upgrade, and I haven't yet been able to persuade jhbuild to make me a new one. So I can't even build the MacOS port with the _old_ makefiles, and hence, I have no way of checking that the new ones also work. I hope to bring that port back to life at some point, but I don't want it to block the rest of this change.
2021-04-10 14:21:11 +00:00
deliver putty/windows/deliver/buildold/*.map maps/w32old/$@
deliver putty/windows/deliver/build32/*.map maps/w32/$@
deliver putty/windows/deliver/build64/*.map maps/w64/$@
deliver putty/windows/deliver/abuild32/*.map maps/wa32/$@
deliver putty/windows/deliver/abuild64/*.map maps/wa64/$@
# Deliver sign.sh, so that whoever has just built PuTTY (the
# snapshot scripts or me, depending) can conveniently sign it with
# whatever key they want.
deliver putty/sign.sh $@
# Create files of cryptographic checksums, which will be signed along
# with the files they verify. We've provided MD5 checksums for a
# while, but now MD5 is looking iffy, we're expanding our selection.
#
# Creating these files is most easily done in the destination
# directory, where all the files we're delivering are already in their
# final relative layout.
in . do pwd > builddir
read Builddir builddir
in-dest putty do a=`\find * -type f -print`; for hash in md5 sha1 sha256 sha512; do ($${hash}sum $$a; echo; cat $(Builddir)/putty/$${hash}sums.installer) > $${hash}sums; done
# And construct .htaccess files. One in the top-level directory,
# setting the MIME types for Windows help files and providing an
# appropriate link to the source archive:
in-dest putty do echo "AddType application/octet-stream .chm" >> .htaccess
in-dest putty do set -- putty*.tar.gz; for k in '' .gpg; do echo RedirectMatch temp '(.*/)'putty.tar.gz$$k\$$ '$$1'"$$1$$k" >> .htaccess; done
# And one in each binary directory, providing links for the installers.
in-dest putty do for params in "w32 putty-installer" "w64 putty-64bit-installer" "wa32 putty-arm32-installer" "wa64 putty-arm64-installer"; do (set -- $$params; subdir=$$1; installername=$$2; cd $$subdir && for ext in msi exe; do set -- putty*installer.$$ext; if test -f $$1; then for k in '' .gpg; do echo RedirectMatch temp '(.*/)'$${installername}.$$ext$$k\$$ '$$1'"$$1$$k" >> .htaccess; done; fi; done); done