diff --git a/.github/workflows/caddy.yml b/.github/workflows/caddy.yml deleted file mode 100644 index 38595d5b..00000000 --- a/.github/workflows/caddy.yml +++ /dev/null @@ -1,99 +0,0 @@ -name: Build caddy with linkup modules - -on: - workflow_dispatch: - inputs: - tag_name: - description: 'Tag to use for the release (e.g., 1.0.0)' - required: true - push: - tags: - - '[0-9][0-9]*.[0-9][0-9]*.[0-9][0-9]*' - -jobs: - build-and-release: - name: Build and Release Caddy with Linkup Modules - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, macos-latest] - arch: [amd64, arm64] - steps: - # Set up Go environment - - name: Set up Go - uses: actions/setup-go@v4 - with: - go-version: '1.23' - - - name: Install xcaddy - run: | - go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest - - # Build Caddy with custom module - - name: Build Caddy with Custom Module - run: | - if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then - TARGET_OS="linux" - else - TARGET_OS="darwin" - fi - xcaddy build \ - --output "caddy-${TARGET_OS}-${{ matrix.arch }}" \ - --with github.com/mentimeter/caddy-dns-linkup \ - --with github.com/mentimeter/caddy-storage-linkup - env: - GOBIN: $HOME/go/bin # Ensure Go binaries are in the PATH - - # Archive the binary - - name: Archive Caddy Binary - run: | - if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then - TARGET_OS="linux" - else - TARGET_OS="darwin" - fi - tar -czvf caddy-${TARGET_OS}-${{ matrix.arch }}.tar.gz caddy-${TARGET_OS}-${{ matrix.arch }} - shell: bash - - - name: Get Release Info - id: get_release - uses: actions/github-script@v7 - with: - script: | - let tagName; - if (context.eventName === 'workflow_dispatch') { - tagName = core.getInput('tag_name'); - console.log(`Tag name from workflow_dispatch: ${tagName}`); - } else if (context.eventName === 'push' && context.ref.startsWith('refs/tags/')) { - tagName = context.ref.replace('refs/tags/', ''); - console.log(`Tag name from push: ${tagName}`); - } else { - throw new Error('This workflow must be triggered by a push to a tag or a manual dispatch with a tag_name input.'); - } - if (!tagName) { - throw new Error('Tag name is empty.'); - } - const releases = await github.rest.repos.listReleases({ - owner: context.repo.owner, - repo: context.repo.repo - }); - const release = releases.data.find(r => r.tag_name === tagName); - if (!release) { - throw new Error(`Release with tag ${tagName} not found.`); - } - console.log(`Found release: ${release.name}`); - core.setOutput('upload_url', release.upload_url); - env: - INPUT_TAG_NAME: ${{ inputs.tag_name }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - # Upload binary to the release - - name: Upload Release Asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.get_release.outputs.upload_url }} - asset_path: ./caddy-${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}-${{ matrix.arch }}.tar.gz - asset_name: caddy-${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}-${{ matrix.arch }}.tar.gz - asset_content_type: application/gzip \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f1375db9..2a67eb69 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,7 +6,10 @@ name: CI jobs: check: name: Check and Clippy - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest] env: RUSTFLAGS: -D warnings steps: @@ -30,8 +33,11 @@ jobs: - run: cargo fmt --all --check test: - name: Test Suite - runs-on: ubuntu-latest + name: Test Suite (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest] steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable diff --git a/.github/workflows/release-next.yml b/.github/workflows/release-next.yml new file mode 100644 index 00000000..7a8f5078 --- /dev/null +++ b/.github/workflows/release-next.yml @@ -0,0 +1,133 @@ +name: Create Release of next branch + +on: + workflow_dispatch: + push: + branches: + - next + +jobs: + generate-version: + name: Generate Release Version + runs-on: ubuntu-latest + outputs: + version: ${{ steps.generate_version.outputs.version }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: "next" + + - name: Generate Next Release Version + id: generate_version + uses: actions/github-script@v7 + with: + script: | + const { execSync } = require('child_process'); + const fs = require('fs'); + const path = require('path'); + + const shortSha = execSync('git rev-parse --short HEAD').toString().trim(); + const now = new Date(); + const pad = (n) => n.toString().padStart(2, '0'); + const timestamp = `${now.getFullYear()}${pad(now.getMonth()+1)}${pad(now.getDate())}${pad(now.getHours())}${pad(now.getMinutes())}`; + const nextVersion = `0.0.0-next-${timestamp}-${shortSha}`; + + core.info(`Set release version to ${nextVersion}`); + + core.setOutput('version', nextVersion); + + build: + name: Build + runs-on: ${{ matrix.os }} + needs: [generate-version] + strategy: + matrix: + include: + - build: linux + os: depot-ubuntu-22.04-8 + target: x86_64-unknown-linux-gnu + - build: aarch64 + os: depot-ubuntu-22.04-arm-8 + target: aarch64-unknown-linux-gnu + linker: gcc-aarch64-linux-gnu + - build: macos + os: depot-macos-14 + target: x86_64-apple-darwin + - build: macos-aarch64 + os: depot-macos-14 + target: aarch64-apple-darwin + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: "next" + + - name: Update Cargo.toml Version + run: | + sed -i.bak 's@^version = .*@version = "${{ needs.generate-version.outputs.version }}"@' linkup-cli/Cargo.toml + rm linkup-cli/Cargo.toml.bak + + - name: Install Rust + run: | + rustup install stable + rustup target add ${{ matrix.target }} + rustup show + + - name: Build + run: cargo build --release --manifest-path linkup-cli/Cargo.toml --target ${{ matrix.target }} + + - name: Package and Calculate Checksums + id: package + uses: actions/github-script@v7 + env: + TARGET: ${{ matrix.target }} + RUNNER_OS: ${{ runner.os }} + RELEASE_VERSION: ${{ needs.generate-version.outputs.version }} + with: + script: | + const { execSync } = require('child_process'); + const fs = require('fs'); + const os = require('os'); + const path = require('path'); + + const releaseVersion = process.env.RELEASE_VERSION; + const target = process.env.TARGET; + const runnerOs = process.env.RUNNER_OS; + if (!releaseVersion) { + throw new Error("RELEASE_VERSION is not set"); + } + + const binaryPath = `target/${target}/release/linkup`; + + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'linkup-')); + + fs.copyFileSync(binaryPath, path.join(tmpDir, 'linkup')); + + const assetName = `linkup-${releaseVersion}-${target}.tar.gz`; + execSync(`tar czf ${assetName} -C ${tmpDir} linkup`); + + let checksum; + if (runnerOs === 'Linux') { + checksum = execSync(`sha256sum ${assetName}`).toString().split(' ')[0]; + } else { + checksum = execSync(`shasum -a 256 ${assetName}`).toString().split(' ')[0]; + } + + const checksumFile = `${assetName}.sha256`; + fs.writeFileSync(checksumFile, `${checksum} ${assetName}`); + + core.setOutput('asset_name', assetName); + core.setOutput('checksum_file', checksumFile); + + - name: Release + uses: softprops/action-gh-release@v1 + with: + tag_name: ${{ needs.generate-version.outputs.version }} + target_commitish: next + prerelease: true + files: | + ${{ steps.package.outputs.asset_name }} + ${{ steps.package.outputs.checksum_file }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/Cargo.lock b/Cargo.lock index 2cd6d80b..de83c4af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,6 +97,51 @@ version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "asn1-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -107,11 +152,22 @@ dependencies = [ "serde_json", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" -version = "0.1.86" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", @@ -177,7 +233,7 @@ dependencies = [ "rustversion", "serde", "sync_wrapper", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", ] @@ -213,7 +269,7 @@ dependencies = [ "sync_wrapper", "tokio", "tokio-tungstenite", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -259,6 +315,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "axum-server" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower 0.4.13", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.74" @@ -548,6 +628,21 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crossbeam-channel" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -659,6 +754,20 @@ version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "deranged" version = "0.3.11" @@ -933,6 +1042,19 @@ dependencies = [ "slab", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -952,10 +1074,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + [[package]] name = "gimli" version = "0.31.1" @@ -1027,7 +1161,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tinyvec", "tokio", @@ -1035,6 +1169,34 @@ dependencies = [ "url", ] +[[package]] +name = "hickory-proto" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "critical-section", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.0", + "ring", + "serde", + "thiserror 2.0.11", + "tinyvec", + "tokio", + "tracing", + "url", +] + [[package]] name = "hickory-resolver" version = "0.24.2" @@ -1043,12 +1205,12 @@ checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" dependencies = [ "cfg-if", "futures-util", - "hickory-proto", + "hickory-proto 0.24.3", "ipconfig", "lru-cache", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", "thiserror 1.0.69", @@ -1056,6 +1218,52 @@ dependencies = [ "tracing", ] +[[package]] +name = "hickory-resolver" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.25.1", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.0", + "resolv-conf", + "serde", + "smallvec", + "thiserror 2.0.11", + "tokio", + "tracing", +] + +[[package]] +name = "hickory-server" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "716f516285473ce476dfc996bac9a3c9ef2fee4f380ebec5980b12216fe4f547" +dependencies = [ + "async-trait", + "bytes", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-util", + "hickory-proto 0.25.1", + "hickory-resolver 0.25.1", + "ipnet", + "prefix-trie", + "serde", + "thiserror 2.0.11", + "time", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "home" version = "0.5.11" @@ -1159,9 +1367,7 @@ dependencies = [ "http", "hyper", "hyper-util", - "log", "rustls", - "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", @@ -1411,6 +1617,9 @@ name = "ipnet" version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +dependencies = [ + "serde", +] [[package]] name = "is_terminal_polyfill" @@ -1503,7 +1712,7 @@ version = "0.1.0" dependencies = [ "hex", "http", - "rand", + "rand 0.8.5", "regex", "serde", "serde_json", @@ -1516,8 +1725,9 @@ dependencies = [ [[package]] name = "linkup-cli" -version = "2.2.0" +version = "3.0.0" dependencies = [ + "anyhow", "base64", "clap", "clap_complete", @@ -1528,13 +1738,13 @@ dependencies = [ "env_logger", "flate2", "hex", - "hickory-resolver", + "hickory-resolver 0.24.2", "linkup", "linkup-local-server", "log", "mockall", "mockito", - "rand", + "rand 0.8.5", "regex", "reqwest", "serde", @@ -1553,17 +1763,21 @@ name = "linkup-local-server" version = "0.1.0" dependencies = [ "axum 0.8.1", + "axum-server", "futures", + "hickory-server", "http", "hyper", "hyper-rustls", "hyper-util", "linkup", + "rcgen", "rustls", "rustls-native-certs", + "rustls-pemfile", "thiserror 2.0.11", "tokio", - "tower", + "tower 0.5.2", "tower-http", ] @@ -1594,7 +1808,7 @@ dependencies = [ "cloudflare", "console_error_panic_hook", "futures", - "getrandom", + "getrandom 0.2.15", "http", "linkup", "regex", @@ -1635,6 +1849,19 @@ version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -1650,6 +1877,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "matchit" version = "0.7.3" @@ -1707,7 +1943,7 @@ checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -1753,7 +1989,7 @@ dependencies = [ "hyper", "hyper-util", "log", - "rand", + "rand 0.8.5", "regex", "serde_json", "serde_urlencoded", @@ -1761,6 +1997,25 @@ dependencies = [ "tokio", ] +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid", +] + [[package]] name = "native-tls" version = "0.2.12" @@ -1809,12 +2064,41 @@ dependencies = [ "winapi", ] +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + [[package]] name = "num-conv" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -1833,11 +2117,24 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "openssl" @@ -1883,6 +2180,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "parking_lot" version = "0.12.3" @@ -1912,6 +2215,16 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64", + "serde", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -1956,6 +2269,12 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + [[package]] name = "powerfmt" version = "0.2.0" @@ -1968,7 +2287,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -1997,6 +2316,16 @@ dependencies = [ "termtree", ] +[[package]] +name = "prefix-trie" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb5f930995ba4986bd239ba8d8fded67cad82d1db329c4f316f312847cba16aa" +dependencies = [ + "ipnet", + "num-traits", +] + [[package]] name = "prettyplease" version = "0.2.29" @@ -2056,8 +2385,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", - "getrandom", - "rand", + "getrandom 0.2.15", + "rand 0.8.5", "ring", "rustc-hash 2.1.0", "rustls", @@ -2092,6 +2421,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "rand" version = "0.8.5" @@ -2099,8 +2434,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy 0.8.24", ] [[package]] @@ -2110,7 +2456,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -2119,7 +2475,16 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", ] [[package]] @@ -2142,6 +2507,20 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "x509-parser", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.5.8" @@ -2159,8 +2538,17 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -2171,9 +2559,15 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.5", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.5" @@ -2227,7 +2621,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls", - "tower", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -2255,7 +2649,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -2319,6 +2713,15 @@ dependencies = [ "semver", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustix" version = "0.38.43" @@ -2339,7 +2742,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" dependencies = [ "aws-lc-rs", - "log", "once_cell", "ring", "rustls-pki-types", @@ -2411,6 +2813,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -2600,6 +3008,15 @@ dependencies = [ "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -2733,7 +3150,7 @@ dependencies = [ "memchr", "ntapi", "rayon", - "windows", + "windows 0.57.0", ] [[package]] @@ -2757,6 +3174,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tar" version = "0.4.43" @@ -2776,7 +3199,7 @@ checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", - "getrandom", + "getrandom 0.2.15", "once_cell", "rustix", "windows-sys 0.59.0", @@ -2828,6 +3251,16 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + [[package]] name = "time" version = "0.3.37" @@ -2975,6 +3408,21 @@ dependencies = [ "winnow", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.5.2" @@ -3049,6 +3497,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", ] [[package]] @@ -3069,7 +3547,7 @@ dependencies = [ "http", "httparse", "log", - "rand", + "rand 0.8.5", "sha1", "thiserror 2.0.11", "utf-8", @@ -3147,9 +3625,16 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" dependencies = [ + "getrandom 0.2.15", "serde", ] +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3177,6 +3662,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -3340,6 +3834,16 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" @@ -3355,12 +3859,25 @@ version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ - "windows-implement", - "windows-interface", + "windows-implement 0.57.0", + "windows-interface 0.57.0", "windows-result 0.1.2", "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement 0.58.0", + "windows-interface 0.58.0", + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", +] + [[package]] name = "windows-implement" version = "0.57.0" @@ -3372,6 +3889,17 @@ dependencies = [ "syn", ] +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-interface" version = "0.57.0" @@ -3383,6 +3911,17 @@ dependencies = [ "syn", ] +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-registry" version = "0.2.0" @@ -3589,6 +4128,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags", +] + [[package]] name = "worker" version = "0.5.0" @@ -3676,6 +4224,24 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "ring", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + [[package]] name = "xattr" version = "1.4.0" @@ -3687,6 +4253,15 @@ dependencies = [ "rustix", ] +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "yoke" version = "0.7.5" @@ -3718,7 +4293,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" +dependencies = [ + "zerocopy-derive 0.8.24", ] [[package]] @@ -3732,6 +4316,17 @@ dependencies = [ "syn", ] +[[package]] +name = "zerocopy-derive" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "zerofrom" version = "0.1.5" diff --git a/docs/src/content/docs/explanation/how-it-works.md b/docs/src/content/docs/explanation/how-it-works.md index e1df7fb1..af7540da 100644 --- a/docs/src/content/docs/explanation/how-it-works.md +++ b/docs/src/content/docs/explanation/how-it-works.md @@ -121,5 +121,3 @@ In order to be able to direct traffic to servers that might be running on `local In its default mode, Linkup has a fairly strong dependency on the network. For frontend engineers who are running development servers, they may have pages that require 50-100 mb of JavaScript to load. In order to speed up cases where the network might be a bottleneck, Linkup provides a local DNS mode that is optionally installable on developers' machines. Local DNS will resolve your application's domains directly to servers running on your local machine. This means that all requests that could have been handled directly by your local machine will not go over the public internet. Linkup also has the ability to manage certificates associated with these local domains to make the experience as seamless as possible. - -Currently, linkup local DNS uses [dnsmasq](https://www.dnsmasq.org/) to provide local DNS resolution. And [caddy](https://caddyserver.com/) to provide tls certificates. \ No newline at end of file diff --git a/docs/src/content/docs/guides/local-env.md b/docs/src/content/docs/guides/local-env.md index 2501e360..bb375c07 100644 --- a/docs/src/content/docs/guides/local-env.md +++ b/docs/src/content/docs/guides/local-env.md @@ -1,5 +1,5 @@ --- -title: Run a Local Linkup Session +title: Run a Local Linkup Session description: Get started with linkup by running a local linkup session sidebar: order: 1 @@ -11,6 +11,8 @@ sidebar: ## Installing the CLI +### With Homebrew + To use link up locally the easiest way to get started is to use the linkup cli: ```sh @@ -18,6 +20,16 @@ brew tap mentimeter/mentimeter brew install linkup ``` +### Using the `install.py` script + +```sh +curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/mentimeter/linkup/refs/heads/main/linkup-cli/install | python3 + +# Or to install a pre-release version (beta) + +curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/mentimeter/linkup/refs/heads/main/linkup-cli/install | python3 - --channel beta +``` + Once you have the cli installed you can start a linkup session by running: ```zsh diff --git a/linkup-cli/Cargo.toml b/linkup-cli/Cargo.toml index abd95073..69a93d81 100644 --- a/linkup-cli/Cargo.toml +++ b/linkup-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linkup-cli" -version = "2.2.0" +version = "3.0.0" edition = "2021" build = "build.rs" @@ -9,6 +9,7 @@ name = "linkup" path = "src/main.rs" [dependencies] +anyhow = "1" clap = { version = "4.5.27", features = ["derive", "cargo"] } clap_complete = "4.5.42" cloudflare = { path = "../cloudflare", default-features = false, features = [ @@ -47,3 +48,6 @@ flate2 = "1.0.35" [dev-dependencies] mockall = "0.13.1" mockito = "1.6.1" + +[features] +default = [] diff --git a/linkup-cli/install.py b/linkup-cli/install.py new file mode 100755 index 00000000..ae759f08 --- /dev/null +++ b/linkup-cli/install.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python3 + +import sys + +python_version = sys.version_info +if python_version.major < 3 or python_version.minor < 2: + print(f"Minimum required Python version is 3.2. Current one: {sys.version.split(' ')[0]}") + exit(1) + +import argparse +import json +import os +import re +import shutil +import tarfile +import urllib.request +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Any, Optional, Tuple, List + +LINKUP_BIN_PATH = Path.home() / ".linkup" / "bin" + +class Shell(Enum): + bash = "bash" + zsh = "zsh" + fish = "fish" + + + @staticmethod + def from_str(value: str) -> Optional["Shell"]: + value_lower = value.lower() + + if value_lower == "bash": + return Shell.bash + elif value_lower == "zsh": + return Shell.zsh + elif value_lower == "fish": + return Shell.fish + else: + return None + + + def add_to_profile_command(self) -> Optional[str]: + if self == Shell.bash: + return f"echo 'export PATH=$PATH:{LINKUP_BIN_PATH}' >> {Path.home()}/.bashrc" + elif self == Shell.zsh: + return f"echo 'export PATH=$PATH:{LINKUP_BIN_PATH}' >> {Path.home()}/.zshrc" + elif self == Shell.fish: + return f"echo 'set -gx PATH $PATH {LINKUP_BIN_PATH}' >> {Path.home()}/.config/fish/config.fish" + else: + return None + + +class OS(Enum): + MacOS = "apple-darwin" + Linux = "unknown-linux-gnu" + + +class Arch(Enum): + x86_64 = "x86_64" + arm64 = "aarch64" + + +class Channel(Enum): + stable = "stable" + beta = "beta" + + +@dataclass +class GithubReleaseAsset: + name: str + browser_download_url: str + + +@dataclass +class GithubRelease: + tag_name: str + prerelease: bool + assets: List[GithubReleaseAsset] + + @staticmethod + def from_json(obj: Any) -> "GithubRelease": + assets = [ + GithubReleaseAsset( + name=asset["name"], + browser_download_url=asset["browser_download_url"], + ) + for asset in obj["assets"] + ] + + return GithubRelease(tag_name=obj["tag_name"], prerelease=obj["prerelease"], assets=assets) + + +@dataclass +class Context: + channel: Channel + fetch_os: Optional[OS] = None + fetch_arch: Optional[Arch] = None + release_data: Optional[GithubRelease] = None + + +def command_exists(cmd: str) -> bool: + return shutil.which(cmd) is not None + + +def check_dependencies() -> None: + if not command_exists("cloudflared"): + print("WARN: 'cloudflared' is not installed. Please install it before installing Linkup.") + print("More info: https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/downloads/") + sys.exit(1) + + +def detect_platform() -> Tuple[OS, Arch]: + os_name = os.uname().sysname + arch = os.uname().machine + + if os_name.startswith("Darwin"): + fetch_os = OS.MacOS + elif os_name.startswith("Linux"): + fetch_os = OS.Linux + else: + print(f"Unsupported OS: {os_name}") + sys.exit(1) + + if arch in ("arm64", "aarch64"): + fetch_arch = Arch.arm64 + elif arch == "x86_64": + fetch_arch = Arch.x86_64 + else: + print(f"Unsupported Arch: {arch}") + sys.exit(1) + + return fetch_os, fetch_arch + + +def get_release_data(channel: Channel) -> GithubRelease: + if channel == Channel.beta: + print("Looking for the latest beta version...") + releases = list_releases() + + pre_releases = [r for r in releases if r.prerelease] + if not pre_releases: + print("No pre-releases found. Falling back to latest stable release.") + + return get_latest_stable_release() + else: + print(f"Found pre-release version: {pre_releases[0].tag_name}") + + return pre_releases[0] + else: + return get_latest_stable_release() + + +def list_releases() -> List[GithubRelease]: + req = urllib.request.Request( + "https://api.github.com/repos/mentimeter/linkup/releases", + headers={ + "Accept": "application/vnd.github+json", + "X-GitHub-Api-Version": "2022-11-28" + } + ) + + with urllib.request.urlopen(req) as response: + return [GithubRelease.from_json(release) for release in json.load(response)] + + +def get_latest_stable_release() -> GithubRelease: + req = urllib.request.Request( + "https://api.github.com/repos/mentimeter/linkup/releases/latest", + headers={ + "Accept": "application/vnd.github+json", + "X-GitHub-Api-Version": "2022-11-28" + } + ) + + with urllib.request.urlopen(req) as response: + return GithubRelease.from_json(json.load(response)) + + +def download_and_extract(user_os: OS, user_arch: Arch, channel: Channel, release: GithubRelease) -> None: + print(f"Latest release on {channel.name} channel: {release.tag_name}.") + print(f"Looking for asset for {user_os.value}/{user_arch.value}...") + asset_pattern = re.compile(rf"linkup-.+-{user_arch.value}-{user_os.value}\.tar\.gz$") + + download_url = next( + ( + asset.browser_download_url + for asset in release.assets if asset_pattern.match(asset.name) + ), + None, + ) + + if not download_url: + print("Could not find matching tarball in the release assets.") + sys.exit(1) + + print(f"Downloading: {download_url}") + local_tar_path = Path("/tmp") / Path(download_url).name + + with urllib.request.urlopen(download_url) as response, open(local_tar_path, "wb") as out_file: + shutil.copyfileobj(response, out_file) + + print(f"Decompressing {local_tar_path}") + with tarfile.open(local_tar_path, "r:gz") as tar: + tar.extractall(path="/tmp") + + LINKUP_BIN_PATH.mkdir(parents=True, exist_ok=True) + shutil.move("/tmp/linkup", LINKUP_BIN_PATH / "linkup") + os.chmod(LINKUP_BIN_PATH / "linkup", 0o755) + + print(f"Linkup installed at {LINKUP_BIN_PATH / 'linkup'}") + local_tar_path.unlink() + + +def setup_path() -> None: + if str(LINKUP_BIN_PATH) in os.environ.get("PATH", "").split(":"): + return + + print(f"\nTo start using Linkup, add '{LINKUP_BIN_PATH}' to your PATH.") + + shell = Shell.from_str(os.path.basename(os.environ.get("SHELL", ""))) + if shell is None: + return + + print(f"Since you are using {shell.name}, you can run the following to add to your profile:") + print(f"\n {shell.add_to_profile_command()}") + print("\nThen restart your shell."); + + +def parse_arguments(args: List[str]) -> Context: + parser = argparse.ArgumentParser(description="Install Linkup CLI") + + parser.add_argument( + "--channel", + choices=["stable", "beta"], + default="stable", + help="Release channel to use (default: stable)" + ) + + parsed = parser.parse_args(args) + channel = Channel[parsed.channel] + + return Context(channel=channel) + + +def main() -> None: + if command_exists("linkup"): + print("Linkup is already installed. To update it, run 'linkup update'.") + sys.exit(0) + + context = parse_arguments(sys.argv[1:]) + + check_dependencies() + + user_os, user_arch = detect_platform() + release = get_release_data(context.channel) + download_and_extract(user_os, user_arch, context.channel, release) + + setup_path() + + print("Linkup installation complete! 🎉") + + +if __name__ == "__main__": + main() diff --git a/linkup-cli/install.sh b/linkup-cli/install.sh deleted file mode 100755 index 059e42ce..00000000 --- a/linkup-cli/install.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/sh - -if command -v -- "linkup" >/dev/null 2>&1; then - printf '%s\n' "Linkup is already installed. To update it, run 'linkup update'." 1>&2 - exit 0 -fi - -# region: Dependencies -# TODO: Maybe we want this script to be able to install the dependencies as well? -if ! command -v -- "cloudflared" >/dev/null 2>&1; then - printf '%s\n' "WARN: 'cloudflared' is not installed. Please install it before installing Linkup.\nFor more info check: https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/downloads/" 1>&2 - exit 1 -fi - -if ! command -v -- "dnsmasq" >/dev/null 2>&1; then - printf '%s\n' "WARN: 'dnsmasq' is not installed. Some features will not work as expected. Please install it.\nFor more info check: https://thekelleys.org.uk/dnsmasq/doc.html" 1>&2 -fi -# endregion: Dependencies - -OS=$(uname -s) -ARCH=$(uname -m) - -FETCH_OS='' -FETCH_ARCH='' -case "$OS" in -Darwin*) - FETCH_OS='apple-darwin' - case "$ARCH" in - arm64 | aarch64) - FETCH_ARCH='aarch64' - ;; - x86_64) - FETCH_ARCH='x86_64' - ;; - esac - ;; -Linux*) - FETCH_OS='unknown-linux-gnu' - case "$ARCH" in - arm64 | aarch64) - FETCH_ARCH='aarch64' - ;; - x86_64) - FETCH_ARCH='x86_64' - ;; - esac - ;; -esac - -if [ -z "$FETCH_OS" ] || [ -z "$FETCH_ARCH" ]; then - printf '%s\n' "Unsupported OS/Arch combination: $OS/$ARCH" 1>&2 - exit 1 -fi - -LOOKUP_FILE_DOWNLOAD_URL="https://github.com/mentimeter/linkup/releases/download/.*/linkup-.*-$FETCH_ARCH-$FETCH_OS.tar.gz" -FILE_DOWNLOAD_URL=$( - curl -sL \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/mentimeter/linkup/releases/latest | - grep -Eio "$LOOKUP_FILE_DOWNLOAD_URL" -) - -if [ -z "$FILE_DOWNLOAD_URL" ]; then - printf '%s\n' "Could not find file with pattern '$LOOKUP_FILE_DOWNLOAD_URL' on the latest GitHub release." 1>&2 - exit 1 -fi - -printf '%s\n' "Downloading: $FILE_DOWNLOAD_URL" 1>&2 -curl -sLO --output-dir "/tmp" $FILE_DOWNLOAD_URL - -LOCAL_FILE_PATH="/tmp/$(basename $FILE_DOWNLOAD_URL)" - -printf '%s\n' "Decompressing $LOCAL_FILE_PATH" 1>&2 -tar -xzf $LOCAL_FILE_PATH -C /tmp - -mkdir -p $HOME/.linkup/bin -mv /tmp/linkup $HOME/.linkup/bin/ -printf '%s\n' "Linkup installed on $HOME/.linkup/bin/linkup" 1>&2 - -rm "$LOCAL_FILE_PATH" - -case ":$PATH:" in -*":$HOME/.linkup/bin:"*) - # PATH already contains the directory - ;; -*) - SHELL_NAME=$(basename "$SHELL") - case "$SHELL_NAME" in - bash) - PROFILE_FILE="$HOME/.bashrc" - ;; - zsh) - PROFILE_FILE="$HOME/.zshrc" - ;; - fish) - PROFILE_FILE="$HOME/.config/fish/config.fish" - ;; - *) - PROFILE_FILE="$HOME/.profile" - ;; - esac - - printf '%s\n' "Adding Linkup bin to PATH in $PROFILE_FILE" 1>&2 - printf "\n# Linkup bin\nexport PATH=\$PATH:\$HOME/.linkup/bin" >>"$PROFILE_FILE" - printf '%s\n' "Please source your profile file or restart your terminal to apply the changes." 1>&2 - ;; -esac diff --git a/linkup-cli/src/commands/completion.rs b/linkup-cli/src/commands/completion.rs index 8e798cdd..7db1fe39 100644 --- a/linkup-cli/src/commands/completion.rs +++ b/linkup-cli/src/commands/completion.rs @@ -3,7 +3,7 @@ use std::io::stdout; use clap::{Command, CommandFactory}; use clap_complete::{generate, Generator, Shell}; -use crate::{Cli, CliError}; +use crate::{Cli, Result}; #[derive(clap::Args)] pub struct Args { @@ -11,7 +11,7 @@ pub struct Args { shell: Option, } -pub fn completion(args: &Args) -> Result<(), CliError> { +pub fn completion(args: &Args) -> Result<()> { if let Some(shell) = &args.shell { let mut cmd = Cli::command(); print_completions(shell, &mut cmd); diff --git a/linkup-cli/src/commands/deploy/cf_deploy.rs b/linkup-cli/src/commands/deploy/cf_deploy.rs index 9077e32a..821920ce 100644 --- a/linkup-cli/src/commands/deploy/cf_deploy.rs +++ b/linkup-cli/src/commands/deploy/cf_deploy.rs @@ -1,5 +1,6 @@ use crate::commands::deploy::auth; use crate::commands::deploy::resources::cf_resources; +use crate::Result; use super::api::{AccountCloudflareApi, CloudflareApi}; use super::console_notify::ConsoleNotifier; @@ -7,8 +8,6 @@ use super::resources::TargetCfResources; #[derive(thiserror::Error, Debug)] pub enum DeployError { - #[error("No authentication method found, please set CLOUDFLARE_API_KEY and CLOUDFLARE_EMAIL or CLOUDFLARE_API_TOKEN")] - NoAuthenticationError, #[error("Cloudflare API error: {0}")] CloudflareApiError(#[from] reqwest::Error), #[error("Cloudflare Client error: {0}")] @@ -45,7 +44,7 @@ pub struct DeployArgs { zone_ids: Vec, } -pub async fn deploy(args: &DeployArgs) -> Result<(), DeployError> { +pub async fn deploy(args: &DeployArgs) -> Result<()> { println!("Deploying to Cloudflare..."); println!("Account ID: {}", args.account_id); println!("Zone IDs: {:?}", args.zone_ids); @@ -93,7 +92,7 @@ pub async fn deploy_to_cloudflare( api: &impl CloudflareApi, cloudflare_client: &cloudflare::framework::async_api::Client, notifier: &impl DeployNotifier, -) -> Result<(), DeployError> { +) -> Result<()> { // 1) Check what needs to change let plan = resources.check_deploy_plan(api, cloudflare_client).await?; diff --git a/linkup-cli/src/commands/deploy/cf_destroy.rs b/linkup-cli/src/commands/deploy/cf_destroy.rs index df0f8d34..b0fb9d1d 100644 --- a/linkup-cli/src/commands/deploy/cf_destroy.rs +++ b/linkup-cli/src/commands/deploy/cf_destroy.rs @@ -1,10 +1,9 @@ use crate::commands::deploy::{ api::AccountCloudflareApi, auth, console_notify::ConsoleNotifier, resources::cf_resources, }; +use crate::Result; -use super::{ - api::CloudflareApi, cf_deploy::DeployNotifier, resources::TargetCfResources, DeployError, -}; +use super::{api::CloudflareApi, cf_deploy::DeployNotifier, resources::TargetCfResources}; #[derive(clap::Args)] pub struct DestroyArgs { @@ -27,7 +26,7 @@ pub struct DestroyArgs { zone_ids: Vec, } -pub async fn destroy(args: &DestroyArgs) -> Result<(), DeployError> { +pub async fn destroy(args: &DestroyArgs) -> Result<()> { println!("Destroying from Cloudflare..."); println!("Account ID: {}", args.account_id); println!("Zone IDs: {:?}", args.zone_ids); @@ -76,7 +75,7 @@ pub async fn destroy_from_cloudflare( api: &impl CloudflareApi, cloudflare_client: &cloudflare::framework::async_api::Client, notifier: &impl DeployNotifier, -) -> Result<(), DeployError> { +) -> Result<()> { // 1) Check which resources actually exist and need removal let plan = resources.check_destroy_plan(api).await?; diff --git a/linkup-cli/src/commands/health.rs b/linkup-cli/src/commands/health.rs index ed04d2b8..c2058a6a 100644 --- a/linkup-cli/src/commands/health.rs +++ b/linkup-cli/src/commands/health.rs @@ -6,10 +6,17 @@ use std::{ use clap::crate_version; use colored::Colorize; +use regex::Regex; use serde::Serialize; -use crate::{linkup_dir_path, local_config::LocalState, services, CliError}; +use crate::{ + linkup_dir_path, + local_config::LocalState, + services::{self, find_service_pid, BackgroundService}, + Result, +}; +#[cfg(target_os = "macos")] use super::local_dns; #[derive(clap::Args)] @@ -19,7 +26,7 @@ pub struct Args { json: bool, } -pub fn health(args: &Args) -> Result<(), CliError> { +pub fn health(args: &Args) -> Result<()> { let health = Health::load()?; let health = if args.json { @@ -57,17 +64,16 @@ struct Session { } impl Session { - fn load() -> Result { - let state = LocalState::load()?; - - Ok(Self { - name: state.linkup.session_name, + fn load(state: &LocalState) -> Self { + Self { + name: state.linkup.session_name.clone(), tunnel_url: state .linkup .tunnel + .clone() .map(|url| url.as_str().to_string()) .unwrap_or("None".to_string()), - }) + } } } @@ -80,9 +86,9 @@ struct OrphanProcess { #[derive(Debug, Serialize)] struct BackgroudServices { linkup_server: BackgroundServiceHealth, - caddy: BackgroundServiceHealth, - dnsmasq: BackgroundServiceHealth, cloudflared: BackgroundServiceHealth, + #[cfg(target_os = "macos")] + dns_server: BackgroundServiceHealth, possible_orphan_processes: Vec, } @@ -94,10 +100,11 @@ enum BackgroundServiceHealth { } impl BackgroudServices { - fn load() -> Self { + #[cfg_attr(not(target_os = "macos"), allow(unused_variables))] + fn load(state: &LocalState) -> Self { let mut managed_pids: Vec = Vec::with_capacity(4); - let linkup_server = match services::LocalServer::new().running_pid() { + let linkup_server = match find_service_pid(services::LocalServer::ID) { Some(pid) => { managed_pids.push(pid); @@ -106,8 +113,8 @@ impl BackgroudServices { None => BackgroundServiceHealth::Stopped, }; - let dnsmasq = if services::is_dnsmasq_installed() { - match services::Dnsmasq::new().running_pid() { + let cloudflared = if services::is_cloudflared_installed() { + match find_service_pid(services::CloudflareTunnel::ID) { Some(pid) => { managed_pids.push(pid); @@ -119,71 +126,85 @@ impl BackgroudServices { BackgroundServiceHealth::NotInstalled }; - let caddy = if services::is_caddy_installed() { - match services::Caddy::new().running_pid() { - Some(pid) => { - managed_pids.push(pid); + #[cfg(target_os = "macos")] + let dns_server = match find_service_pid(services::LocalDnsServer::ID) { + Some(pid) => { + managed_pids.push(pid); - BackgroundServiceHealth::Running(pid.as_u32()) - } - None => BackgroundServiceHealth::Stopped, + BackgroundServiceHealth::Running(pid.as_u32()) } - } else { - BackgroundServiceHealth::NotInstalled - }; - - let cloudflared = if services::is_cloudflared_installed() { - match services::CloudflareTunnel::new().running_pid() { - Some(pid) => { - managed_pids.push(pid); - - BackgroundServiceHealth::Running(pid.as_u32()) + None => { + if local_dns::is_installed(&crate::local_config::managed_domains( + Some(state), + &None, + )) { + BackgroundServiceHealth::Stopped + } else { + BackgroundServiceHealth::NotInstalled } - None => BackgroundServiceHealth::Stopped, } - } else { - BackgroundServiceHealth::NotInstalled }; Self { linkup_server, - caddy, - dnsmasq, cloudflared, + #[cfg(target_os = "macos")] + dns_server, possible_orphan_processes: find_potential_orphan_processes(managed_pids), } } } fn find_potential_orphan_processes(managed_pids: Vec) -> Vec { - let current_pid = services::get_current_process_pid(); + let env_var_format = Regex::new(r"^[A-Z_][A-Z0-9_]*=.*$").unwrap(); + + let current_pid = sysinfo::get_current_pid().unwrap(); let mut orphans = Vec::new(); for (pid, process) in services::system().processes() { - if process - .cmd() - .iter() - .any(|item| item.to_string_lossy().contains("linkup")) - && pid != ¤t_pid - && !managed_pids.contains(pid) - { - let process_cmd = process - .cmd() - .iter() - .map(|s| s.to_string_lossy()) - .collect::>() - .join(" "); - - orphans.push(OrphanProcess { - cmd: process_cmd, - pid: pid.as_u32(), - }); + if pid == ¤t_pid || managed_pids.contains(pid) { + continue; + } + + let command = process.cmd(); + for part in command.iter() { + let mut part_string = part.to_string_lossy(); + + if env_var_format.is_match(&part_string) { + part_string = part_string + .replace(&linkup_dir_path().to_string_lossy().to_string(), "") + .into(); + } + + if part_string.contains("linkup") { + let full_command = command + .iter() + .map(|part| part.to_string_lossy()) + .collect::>() + .join(" "); + + orphans.push(OrphanProcess { + cmd: truncate_with_ellipsis(&full_command, 120), + pid: pid.as_u32(), + }); + } } } orphans } +fn truncate_with_ellipsis(value: &str, max_len: usize) -> String { + if value.len() > max_len { + let mut truncated = value.chars().take(max_len - 3).collect::(); + + truncated.push_str("..."); + truncated + } else { + value.to_string() + } +} + #[derive(Debug, Serialize)] struct Linkup { version: String, @@ -193,7 +214,7 @@ struct Linkup { } impl Linkup { - fn load() -> Result { + fn load() -> Result { let dir_path = linkup_dir_path(); let files: Vec = fs::read_dir(&dir_path)? .map(|f| f.unwrap().file_name().into_string().unwrap()) @@ -208,14 +229,21 @@ impl Linkup { } } +#[cfg(target_os = "macos")] #[derive(Debug, Serialize)] struct LocalDNS { + is_installed: bool, resolvers: Vec, } +#[cfg(target_os = "macos")] impl LocalDNS { - fn load() -> Result { + fn load(state: &LocalState) -> Result { Ok(Self { + is_installed: local_dns::is_installed(&crate::local_config::managed_domains( + Some(state), + &None, + )), resolvers: local_dns::list_resolvers()?, }) } @@ -224,29 +252,25 @@ impl LocalDNS { #[derive(Debug, Serialize)] struct Health { system: System, - session: Option, + session: Session, background_services: BackgroudServices, linkup: Linkup, + #[cfg(target_os = "macos")] local_dns: LocalDNS, } impl Health { - pub fn load() -> Result { - let session = match Session::load() { - Ok(session) => Some(session), - Err(CliError::NoState(_)) => None, - Err(error) => { - log::error!("Failed to load Session: {}", error); - None - } - }; + pub fn load() -> Result { + let state = LocalState::load()?; + let session = Session::load(&state); Ok(Self { system: System::load(), session, - background_services: BackgroudServices::load(), + background_services: BackgroudServices::load(&state), linkup: Linkup::load()?, - local_dns: LocalDNS::load()?, + #[cfg(target_os = "macos")] + local_dns: LocalDNS::load(&state)?, }) } } @@ -262,20 +286,8 @@ impl Display for Health { writeln!(f, " Architecture: {}", self.system.arch)?; writeln!(f, "{}", "Session info:".bold().italic())?; - writeln!( - f, - " Name: {}", - self.session - .as_ref() - .map_or("NONE".yellow(), |session| session.name.normal()) - )?; - writeln!( - f, - " Tunnel URL: {}", - self.session - .as_ref() - .map_or("NONE".yellow(), |session| session.tunnel_url.normal()) - )?; + writeln!(f, " Name: {}", self.session.name.normal())?; + writeln!(f, " Tunnel URL: {}", self.session.tunnel_url.normal())?; writeln!(f, "{}", "Background sevices:".bold().italic())?; write!(f, " - Linkup Server ")?; @@ -284,18 +296,21 @@ impl Display for Health { BackgroundServiceHealth::Stopped => writeln!(f, "{}", "NOT RUNNING".yellow())?, BackgroundServiceHealth::Running(pid) => writeln!(f, "{} ({})", "RUNNING".blue(), pid)?, } - write!(f, " - Caddy ")?; - match &self.background_services.caddy { - BackgroundServiceHealth::NotInstalled => writeln!(f, "{}", "NOT INSTALLED".yellow())?, - BackgroundServiceHealth::Stopped => writeln!(f, "{}", "NOT RUNNING".yellow())?, - BackgroundServiceHealth::Running(pid) => writeln!(f, "{} ({})", "RUNNING".blue(), pid)?, - } - write!(f, " - dnsmasq ")?; - match &self.background_services.dnsmasq { - BackgroundServiceHealth::NotInstalled => writeln!(f, "{}", "NOT INSTALLED".yellow())?, - BackgroundServiceHealth::Stopped => writeln!(f, "{}", "NOT RUNNING".yellow())?, - BackgroundServiceHealth::Running(pid) => writeln!(f, "{} ({})", "RUNNING".blue(), pid)?, + + #[cfg(target_os = "macos")] + { + write!(f, " - DNS Server ")?; + match &self.background_services.dns_server { + BackgroundServiceHealth::NotInstalled => { + writeln!(f, "{}", "NOT INSTALLED".yellow())? + } + BackgroundServiceHealth::Stopped => writeln!(f, "{}", "NOT RUNNING".yellow())?, + BackgroundServiceHealth::Running(pid) => { + writeln!(f, "{} ({})", "RUNNING".blue(), pid)? + } + } } + write!(f, " - Cloudflared ")?; match &self.background_services.cloudflared { BackgroundServiceHealth::NotInstalled => writeln!(f, "{}", "NOT INSTALLED".yellow())?, @@ -321,13 +336,23 @@ impl Display for Health { } } - write!(f, "{}", "Local DNS resolvers:".bold().italic())?; - if self.local_dns.resolvers.is_empty() { - writeln!(f, " {}", "EMPTY".yellow())?; - } else { - writeln!(f)?; - for file in &self.local_dns.resolvers { - writeln!(f, " - {}", file)?; + #[cfg(target_os = "macos")] + { + writeln!(f, "{}", "Local DNS:".bold().italic())?; + write!(f, " Installed: ",)?; + if self.local_dns.is_installed { + writeln!(f, "{}", "YES".green())?; + } else { + writeln!(f, "{}", "NO".yellow())?; + } + write!(f, " Resolvers:")?; + if self.local_dns.resolvers.is_empty() { + writeln!(f, " {}", "EMPTY".yellow())?; + } else { + writeln!(f)?; + for file in &self.local_dns.resolvers { + writeln!(f, " - {}", file)?; + } } } diff --git a/linkup-cli/src/commands/local.rs b/linkup-cli/src/commands/local.rs index 4515cf4b..0f395ad6 100644 --- a/linkup-cli/src/commands/local.rs +++ b/linkup-cli/src/commands/local.rs @@ -1,6 +1,8 @@ +use anyhow::anyhow; + use crate::{ local_config::{upload_state, LocalState, ServiceTarget}, - CliError, + Result, }; #[derive(clap::Args)] @@ -16,11 +18,9 @@ pub struct Args { all: bool, } -pub async fn local(args: &Args) -> Result<(), CliError> { +pub async fn local(args: &Args) -> Result<()> { if args.service_names.is_empty() && !args.all { - return Err(CliError::NoSuchService( - "No service names provided".to_string(), - )); + return Err(anyhow!("No service names provided")); } let mut state = LocalState::load()?; @@ -35,7 +35,8 @@ pub async fn local(args: &Args) -> Result<(), CliError> { .services .iter_mut() .find(|s| s.name.as_str() == service_name) - .ok_or_else(|| CliError::NoSuchService(service_name.to_string()))?; + .ok_or_else(|| anyhow!("Service with name '{}' does not exist", service_name))?; + service.current = ServiceTarget::Local; } } diff --git a/linkup-cli/src/commands/local_dns.rs b/linkup-cli/src/commands/local_dns.rs index 0b5c6f13..bf549dca 100644 --- a/linkup-cli/src/commands/local_dns.rs +++ b/linkup-cli/src/commands/local_dns.rs @@ -3,12 +3,15 @@ use std::{ process::{Command, Stdio}, }; -use clap::Subcommand; - use crate::{ - commands, is_sudo, - local_config::{config_path, get_config}, - services, sudo_su, CliError, Result, + commands, is_sudo, linkup_certs_dir_path, + local_config::{self, managed_domains, top_level_domains, LocalState}, + sudo_su, Result, +}; +use anyhow::{anyhow, Context}; +use clap::Subcommand; +use linkup_local_server::certificates::{ + setup_self_signed_certificates, uninstall_self_signed_certificates, }; #[derive(clap::Args)] @@ -31,49 +34,57 @@ pub async fn local_dns(args: &Args, config: &Option) -> Result<()> { } pub async fn install(config_arg: &Option) -> Result<()> { - let config_path = config_path(config_arg)?; - let input_config = get_config(&config_path)?; + // NOTE(augustoccesar)[2025-03-24] We decided to print this anyways, even if the current session already have sudo. + // This should help with visibility of what is happening. + println!("Linkup needs sudo access to:"); + println!(" - Ensure there is a folder /etc/resolvers"); + println!(" - Create file(s) for /etc/resolver/"); + println!(" - Add Linkup CA certificate to keychain"); + println!(" - Flush DNS cache"); if !is_sudo() { - println!("Linkup needs sudo access to:"); - println!(" - Ensure there is a folder /etc/resolvers"); - println!(" - Create file(s) for /etc/resolver/"); - println!(" - Flush DNS cache"); - sudo_su()?; } commands::stop(&commands::StopArgs {}, false)?; ensure_resolver_dir()?; - install_resolvers(&input_config.top_level_domains())?; - println!("Installing Caddy..."); + let domains = managed_domains(LocalState::load().ok().as_ref(), config_arg); - services::Caddy::install() - .await - .map_err(|e| CliError::LocalDNSInstall(e.to_string()))?; + install_resolvers(&top_level_domains(&domains))?; + + setup_self_signed_certificates(&linkup_certs_dir_path(), &domains) + .context("Failed to setup self-signed certificates")?; + + println!("Local DNS installed!"); Ok(()) } pub async fn uninstall(config_arg: &Option) -> Result<()> { - let config_path = config_path(config_arg)?; - let input_config = get_config(&config_path)?; + // NOTE(augustoccesar)[2025-03-24] We decided to print this anyways, even if the current session already have sudo. + // This should help with visibility of what is happening. + println!("Linkup needs sudo access to:"); + println!(" - Delete file(s) on /etc/resolver"); + println!(" - Remove Linkup CA certificate from keychain"); + println!(" - Flush DNS cache"); if !is_sudo() { - println!("Linkup needs sudo access to:"); - println!(" - Delete file(s) on /etc/resolver"); - println!(" - Flush DNS cache"); + sudo_su()?; } commands::stop(&commands::StopArgs {}, false)?; - uninstall_resolvers(&input_config.top_level_domains())?; + let managed_top_level_domains = local_config::top_level_domains( + &local_config::managed_domains(LocalState::load().ok().as_ref(), config_arg), + ); - services::Caddy::uninstall() - .await - .map_err(|e| CliError::LocalDNSUninstall(e.to_string()))?; + uninstall_resolvers(&managed_top_level_domains)?; + uninstall_self_signed_certificates(&linkup_certs_dir_path()) + .context("Failed to uninstall self-signed certificates")?; + + println!("Local DNS uninstalled!"); Ok(()) } @@ -84,45 +95,45 @@ fn ensure_resolver_dir() -> Result<()> { .stdout(Stdio::null()) .stderr(Stdio::null()) .status() - .map_err(|err| { - CliError::LocalDNSInstall(format!( - "failed to create /etc/resolver folder. Reason: {}", - err - )) - })?; + .context("Failed to create /etc/resolver folder")?; Ok(()) } +pub fn is_installed(managed_domains: &[String]) -> bool { + match list_resolvers() { + Ok(resolvers) => managed_domains + .iter() + .any(|domain| resolvers.contains(domain)), + Err(error) => { + log::error!("Failed to load resolvers: {}", error); + + false + } + } +} + fn install_resolvers(resolve_domains: &[String]) -> Result<()> { for domain in resolve_domains.iter() { - let cmd_str = format!( - "echo \"nameserver 127.0.0.1\nport 8053\" > /etc/resolver/{}", - domain - ); + let cmd_str = format!("echo \"nameserver 127.0.0.1\nport 8053\" > /etc/resolver/{domain}"); + let status = Command::new("sudo") .arg("bash") .arg("-c") .arg(&cmd_str) .status() - .map_err(|err| { - CliError::LocalDNSInstall(format!( - "Failed to install resolver for domain {} to /etc/resolver/{}. Reason: {}", - domain, domain, err - )) + .with_context(|| { + format!("Failed to install resolver for domain {domain} to /etc/resolver/{domain}") })?; if !status.success() { - return Err(CliError::LocalDNSInstall(format!( - "Failed to install resolver for domain {} to /etc/resolver/{}", - domain, domain - ))); + return Err(anyhow!( + "Failed to install resolver for domain {domain} to /etc/resolver/{domain}" + )); } } flush_dns_cache()?; - - #[cfg(target_os = "macos")] kill_dns_responder()?; Ok(()) @@ -136,17 +147,10 @@ fn uninstall_resolvers(resolve_domains: &[String]) -> Result<()> { .stdout(Stdio::null()) .stderr(Stdio::null()) .status() - .map_err(|err| { - CliError::LocalDNSUninstall(format!( - "Failed to delete /etc/resolver/{}. Reason: {}", - domain, err - )) - })?; + .with_context(|| format!("Failed to delete /etc/resolver/{domain}",))?; } flush_dns_cache()?; - - #[cfg(target_os = "macos")] kill_dns_responder()?; Ok(()) @@ -170,42 +174,26 @@ pub fn list_resolvers() -> std::result::Result, std::io::Error> { } fn flush_dns_cache() -> Result<()> { - #[cfg(target_os = "linux")] - let status_flush = Command::new("resolvectl") - .args(["flush-caches"]) - .status() - .map_err(|_err| { - CliError::LocalDNSInstall("Failed to run resolvectl flush-caches".into()) - })?; - - #[cfg(target_os = "macos")] let status_flush = Command::new("dscacheutil") .args(["-flushcache"]) .status() - .map_err(|_err| { - CliError::LocalDNSInstall("Failed to run dscacheutil -flushcache".into()) - })?; + .context("Failed to flush DNS cache")?; if !status_flush.success() { - return Err(CliError::LocalDNSInstall("Failed flush DNS cache".into())); + return Err(anyhow!("Flushing DNS cache was unsuccessful")); } Ok(()) } -#[cfg(target_os = "macos")] fn kill_dns_responder() -> Result<()> { let status_kill_responder = Command::new("sudo") .args(["killall", "-HUP", "mDNSResponder"]) .status() - .map_err(|_err| { - CliError::LocalDNSInstall("Failed to run killall -HUP mDNSResponder".into()) - })?; + .context("Failed to kill DNS responder")?; if !status_kill_responder.success() { - return Err(CliError::LocalDNSInstall( - "Failed to run killall -HUP mDNSResponder".into(), - )); + return Err(anyhow!("Killing DNS responder was unsuccessful")); } Ok(()) diff --git a/linkup-cli/src/commands/mod.rs b/linkup-cli/src/commands/mod.rs index 808ab18f..09de8067 100644 --- a/linkup-cli/src/commands/mod.rs +++ b/linkup-cli/src/commands/mod.rs @@ -2,6 +2,7 @@ pub mod completion; pub mod deploy; pub mod health; pub mod local; +#[cfg(target_os = "macos")] pub mod local_dns; pub mod preview; pub mod remote; @@ -18,6 +19,7 @@ pub use {deploy::deploy, deploy::DeployArgs}; pub use {deploy::destroy, deploy::DestroyArgs}; pub use {health::health, health::Args as HealthArgs}; pub use {local::local, local::Args as LocalArgs}; +#[cfg(target_os = "macos")] pub use {local_dns::local_dns, local_dns::Args as LocalDnsArgs}; pub use {preview::preview, preview::Args as PreviewArgs}; pub use {remote::remote, remote::Args as RemoteArgs}; diff --git a/linkup-cli/src/commands/preview.rs b/linkup-cli/src/commands/preview.rs index bdce9365..b26a9d83 100644 --- a/linkup-cli/src/commands/preview.rs +++ b/linkup-cli/src/commands/preview.rs @@ -1,7 +1,8 @@ use crate::commands::status::{format_state_domains, SessionStatus}; use crate::local_config::{config_path, get_config}; use crate::worker_client::WorkerClient; -use crate::CliError; +use crate::Result; +use anyhow::Context; use clap::builder::ValueParser; use linkup::CreatePreviewRequest; @@ -19,29 +20,32 @@ pub struct Args { print_request: bool, } -pub async fn preview(args: &Args, config: &Option) -> Result<(), CliError> { +pub async fn preview(args: &Args, config: &Option) -> Result<()> { let config_path = config_path(config)?; let input_config = get_config(&config_path)?; let create_preview_request: CreatePreviewRequest = input_config.create_preview_request(&args.services); let url = input_config.linkup.worker_url.clone(); - let create_req_json = serde_json::to_string(&create_preview_request) - .map_err(|e| CliError::LoadConfig(url.to_string(), e.to_string()))?; if args.print_request { + let create_req_json = serde_json::to_string(&create_preview_request) + .context("Failed to encode request to JSON string")?; + println!("{}", create_req_json); + return Ok(()); } let preview_name = WorkerClient::from(&input_config) .preview(&create_preview_request) .await - .map_err(|e| CliError::LoadConfig(url.to_string(), e.to_string()))?; + .with_context(|| format!("Failed to send preview request to {}", url))?; let status = SessionStatus { name: preview_name.clone(), domains: format_state_domains(&preview_name, &input_config.domains), }; + status.print(); Ok(()) diff --git a/linkup-cli/src/commands/remote.rs b/linkup-cli/src/commands/remote.rs index f7de87b6..36503954 100644 --- a/linkup-cli/src/commands/remote.rs +++ b/linkup-cli/src/commands/remote.rs @@ -1,8 +1,10 @@ use crate::{ local_config::{upload_state, LocalState, ServiceTarget}, - CliError, + Result, }; +use anyhow::anyhow; + #[derive(clap::Args)] pub struct Args { service_names: Vec, @@ -16,11 +18,9 @@ pub struct Args { all: bool, } -pub async fn remote(args: &Args) -> Result<(), CliError> { +pub async fn remote(args: &Args) -> Result<()> { if args.service_names.is_empty() && !args.all { - return Err(CliError::NoSuchService( - "No service names provided".to_string(), - )); + return Err(anyhow!("No service names provided")); } let mut state = LocalState::load()?; @@ -35,7 +35,8 @@ pub async fn remote(args: &Args) -> Result<(), CliError> { .services .iter_mut() .find(|s| s.name.as_str() == service_name) - .ok_or_else(|| CliError::NoSuchService(service_name.to_string()))?; + .ok_or_else(|| anyhow!("Service with name '{}' does not exist", service_name))?; + service.current = ServiceTarget::Remote; } } diff --git a/linkup-cli/src/commands/reset.rs b/linkup-cli/src/commands/reset.rs index 02511c4f..b700d9cf 100644 --- a/linkup-cli/src/commands/reset.rs +++ b/linkup-cli/src/commands/reset.rs @@ -1,9 +1,9 @@ -use crate::{commands, local_config::LocalState, CliError}; +use crate::{commands, local_config::LocalState, Result}; #[derive(clap::Args)] pub struct Args {} -pub async fn reset(_args: &Args) -> Result<(), CliError> { +pub async fn reset(_args: &Args) -> Result<()> { let _ = LocalState::load()?; commands::stop(&commands::StopArgs {}, false)?; diff --git a/linkup-cli/src/commands/server.rs b/linkup-cli/src/commands/server.rs index e634ee57..260eadc5 100644 --- a/linkup-cli/src/commands/server.rs +++ b/linkup-cli/src/commands/server.rs @@ -1,22 +1,83 @@ -use std::fs; - -use crate::CliError; +use crate::Result; +use linkup::MemoryStringStore; +use tokio::select; #[derive(clap::Args)] pub struct Args { - #[arg(long)] - pidfile: String, + #[command(subcommand)] + server_kind: ServerKind, } -pub async fn server(args: &Args) -> Result<(), CliError> { - let pid = std::process::id(); - fs::write(&args.pidfile, pid.to_string())?; +#[derive(clap::Subcommand)] +pub enum ServerKind { + LocalWorker { + #[arg(long)] + certs_dir: String, + }, + + Dns { + #[arg(long)] + session_name: String, + #[arg(long, value_parser, num_args = 1.., value_delimiter = ',')] + domains: Vec, + }, +} + +pub async fn server(args: &Args) -> Result<()> { + match &args.server_kind { + #[cfg_attr(not(target_os = "macos"), allow(unused_variables))] + ServerKind::LocalWorker { certs_dir } => { + let config_store = MemoryStringStore::default(); + + let http_config_store = config_store.clone(); + let handler_http = tokio::spawn(async move { + linkup_local_server::start_server_http(http_config_store) + .await + .unwrap(); + }); + + #[cfg(target_os = "macos")] + let handler_https = { + use std::path::PathBuf; + + let https_config_store = config_store.clone(); + let https_certs_dir = PathBuf::from(certs_dir); + + Some(tokio::spawn(async move { + linkup_local_server::start_server_https(https_config_store, &https_certs_dir) + .await; + })) + }; + + #[cfg(not(target_os = "macos"))] + let handler_https: Option> = None; + + match handler_https { + Some(handler_https) => { + select! { + _ = handler_http => (), + _ = handler_https => (), + } + } + None => { + handler_http.await.unwrap(); + } + } + } + ServerKind::Dns { + session_name, + domains, + } => { + let session_name = session_name.clone(); + let domains = domains.clone(); - let res = linkup_local_server::start_server().await; + let handler_dns = tokio::spawn(async move { + linkup_local_server::start_dns_server(session_name, domains).await; + }); - if let Err(pid_file_err) = fs::remove_file(&args.pidfile) { - eprintln!("Failed to remove pidfile: {}", pid_file_err); + handler_dns.await.unwrap(); + } } - res.map_err(|e| e.into()) + Ok(()) } diff --git a/linkup-cli/src/commands/start.rs b/linkup-cli/src/commands/start.rs index 0e5ba7ee..55296de5 100644 --- a/linkup-cli/src/commands/start.rs +++ b/linkup-cli/src/commands/start.rs @@ -8,6 +8,7 @@ use std::{ time::Duration, }; +use anyhow::{anyhow, Context, Error}; use colored::Colorize; use crossterm::{cursor, ExecutableCommand}; @@ -17,7 +18,7 @@ use crate::{ local_config::{config_path, config_to_state, get_config}, services::{self, BackgroundService}, }; -use crate::{local_config::LocalState, CliError}; +use crate::{local_config::LocalState, Result}; const LOADING_CHARS: [char; 10] = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']; @@ -31,11 +32,7 @@ pub struct Args { pub no_tunnel: bool, } -pub async fn start( - args: &Args, - fresh_state: bool, - config_arg: &Option, -) -> Result<(), CliError> { +pub async fn start(args: &Args, fresh_state: bool, config_arg: &Option) -> Result<()> { let mut state = if fresh_state { let state = load_and_save_state(config_arg, args.no_tunnel, true)?; set_linkup_env(state.clone())?; @@ -49,27 +46,8 @@ pub async fn start( let local_server = services::LocalServer::new(); let cloudflare_tunnel = services::CloudflareTunnel::new(); - let caddy = services::Caddy::new(); - let dnsmasq = services::Dnsmasq::new(); - - #[cfg(target_os = "linux")] - { - use crate::{is_sudo, sudo_su}; - match (caddy.should_start(&state.domain_strings()), is_sudo()) { - // Should start Caddy and is not sudo - (Ok(true), false) => { - println!( - "On linux binding port 443 and 80 requires sudo. And this is necessary to start caddy." - ); - - sudo_su()?; - } - // Should not start Caddy or should start Caddy but is already sudo - (Ok(false), _) | (Ok(true), true) => (), - // Can't check if should start Caddy - (Err(error), _) => log::error!("Failed to check if should start Caddy: {}", error), - } - } + #[cfg(target_os = "macos")] + let local_dns_server = services::LocalDnsServer::new(); let mut display_thread: Option> = None; let display_channel = sync::mpsc::channel::(); @@ -82,8 +60,8 @@ pub async fn start( &[ services::LocalServer::NAME, services::CloudflareTunnel::NAME, - services::Caddy::NAME, - services::Dnsmasq::NAME, + #[cfg(target_os = "macos")] + services::LocalDnsServer::NAME, ], status_update_channel.1, display_channel.1, @@ -93,14 +71,14 @@ pub async fn start( // To make sure that we get the last update to the display thread before the error is bubbled up, // we store any error that might happen on one of the steps and only return it after we have // send the message to the display thread to stop and we join it. - let mut exit_error: Option> = None; + let mut exit_error: Option = None; match local_server .run_with_progress(&mut state, status_update_channel.0.clone()) .await { Ok(_) => (), - Err(err) => exit_error = Some(Box::new(err)), + Err(err) => exit_error = Some(err), } if exit_error.is_none() { @@ -109,27 +87,20 @@ pub async fn start( .await { Ok(_) => (), - Err(err) => exit_error = Some(Box::new(err)), + Err(err) => exit_error = Some(err), } } - if exit_error.is_none() { - match caddy - .run_with_progress(&mut state, status_update_channel.0.clone()) - .await - { - Ok(_) => (), - Err(err) => exit_error = Some(Box::new(err)), - } - } - - if exit_error.is_none() { - match dnsmasq - .run_with_progress(&mut state, status_update_channel.0.clone()) - .await - { - Ok(_) => (), - Err(err) => exit_error = Some(Box::new(err)), + #[cfg(target_os = "macos")] + { + if exit_error.is_none() { + match local_dns_server + .run_with_progress(&mut state, status_update_channel.0.clone()) + .await + { + Ok(_) => (), + Err(err) => exit_error = Some(err), + } } } @@ -139,7 +110,7 @@ pub async fn start( } if let Some(exit_error) = exit_error { - return Err(CliError::StartErr(exit_error.to_string())); + return Err(exit_error).context("Failed to start CLI"); } let status = SessionStatus { @@ -251,7 +222,7 @@ fn spawn_display_thread( }) } -fn set_linkup_env(state: LocalState) -> Result<(), CliError> { +fn set_linkup_env(state: LocalState) -> Result<()> { // Set env vars to linkup for service in &state.services { if let Some(d) = &service.directory { @@ -266,7 +237,7 @@ fn load_and_save_state( config_arg: &Option, no_tunnel: bool, is_paid: bool, -) -> Result { +) -> Result { let previous_state = LocalState::load(); let config_path = config_path(config_arg)?; let input_config = get_config(&config_path)?; @@ -288,35 +259,24 @@ fn load_and_save_state( Ok(state) } -fn set_service_env(directory: String, config_path: String) -> Result<(), CliError> { - let config_dir = Path::new(&config_path).parent().ok_or_else(|| { - CliError::SetServiceEnv( - directory.clone(), - "config_path does not have a parent directory".to_string(), - ) - })?; +fn set_service_env(directory: String, config_path: String) -> Result<()> { + let config_dir = Path::new(&config_path) + .parent() + .with_context(|| format!("config_path '{directory}' does not have a parent directory"))?; let service_path = PathBuf::from(config_dir).join(&directory); - let dev_env_files_result = fs::read_dir(service_path); - let dev_env_files: Vec<_> = match dev_env_files_result { - Ok(entries) => entries - .filter_map(Result::ok) - .filter(|entry| { - entry.file_name().to_string_lossy().ends_with(".linkup") - && entry.file_name().to_string_lossy().starts_with(".env.") - }) - .collect(), - Err(e) => { - return Err(CliError::SetServiceEnv( - directory.clone(), - format!("Failed to read directory: {}", e), - )) - } - }; + let dev_env_files: Vec<_> = fs::read_dir(&service_path) + .with_context(|| format!("Failed to read service directory {:?}", &service_path))? + .filter_map(Result::ok) + .filter(|entry| { + entry.file_name().to_string_lossy().ends_with(".linkup") + && entry.file_name().to_string_lossy().starts_with(".env.") + }) + .collect(); if dev_env_files.is_empty() { - return Err(CliError::NoDevEnv(directory)); + return Err(anyhow!("No dev env files found on {:?}", directory)); } for dev_env_file in dev_env_files { diff --git a/linkup-cli/src/commands/status.rs b/linkup-cli/src/commands/status.rs index 64655fbc..1740e3de 100644 --- a/linkup-cli/src/commands/status.rs +++ b/linkup-cli/src/commands/status.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use colored::{ColoredString, Colorize}; use crossterm::{cursor, execute, style::Print, terminal}; use linkup::{get_additional_headers, HeaderMap, StorableDomain, TargetService}; @@ -9,11 +10,10 @@ use std::{ thread::{self, sleep}, time::Duration, }; -use url::Url; use crate::{ local_config::{LocalService, LocalState, ServiceTarget}, - CliError, LINKUP_LOCALSERVER_PORT, + services, Result, }; const LOADING_CHARS: [char; 10] = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']; @@ -30,7 +30,7 @@ pub struct Args { all: bool, } -pub fn status(args: &Args) -> Result<(), CliError> { +pub fn status(args: &Args) -> anyhow::Result<()> { // TODO(augustocesar)[2024-10-28]: Remove --all/-a in a future release. // Do not print the warning in case of JSON so it doesn't break any usage if the result of the command // is passed on to somewhere else. @@ -40,7 +40,7 @@ pub fn status(args: &Args) -> Result<(), CliError> { println!("{}", warning.yellow()); } - let state = LocalState::load()?; + let state = LocalState::load().context("Failed to load local state")?; let linkup_services = linkup_services(&state); let all_services = state.services.into_iter().chain(linkup_services); @@ -282,7 +282,7 @@ pub fn format_state_domains(session_name: &str, domains: &[StorableDomain]) -> V } fn linkup_services(state: &LocalState) -> Vec { - let local_url = Url::parse(&format!("http://localhost:{}", LINKUP_LOCALSERVER_PORT)).unwrap(); + let local_url = services::LocalServer::url(); vec![ LocalService { diff --git a/linkup-cli/src/commands/stop.rs b/linkup-cli/src/commands/stop.rs index 7c689b26..87520b97 100644 --- a/linkup-cli/src/commands/stop.rs +++ b/linkup-cli/src/commands/stop.rs @@ -1,14 +1,17 @@ use std::fs::{self}; use std::path::{Path, PathBuf}; +use anyhow::Context; + use crate::env_files::clear_env_file; use crate::local_config::LocalState; -use crate::{services, CliError}; +use crate::services::{stop_service, BackgroundService}; +use crate::{services, Result}; #[derive(clap::Args)] pub struct Args {} -pub fn stop(_args: &Args, clear_env: bool) -> Result<(), CliError> { +pub fn stop(_args: &Args, clear_env: bool) -> Result<()> { match (LocalState::load(), clear_env) { (Ok(state), true) => { // Reset env vars back to what they were before @@ -29,39 +32,28 @@ pub fn stop(_args: &Args, clear_env: bool) -> Result<(), CliError> { } } - services::LocalServer::new().stop(); - services::CloudflareTunnel::new().stop(); - services::Caddy::new().stop(); - services::Dnsmasq::new().stop(); + stop_service(services::LocalServer::ID); + stop_service(services::CloudflareTunnel::ID); + #[cfg(target_os = "macos")] + stop_service(services::LocalDnsServer::ID); println!("Stopped linkup"); Ok(()) } -fn remove_service_env(directory: String, config_path: String) -> Result<(), CliError> { - let config_dir = Path::new(&config_path).parent().ok_or_else(|| { - CliError::SetServiceEnv( - directory.clone(), - "config_path does not have a parent directory".to_string(), - ) - })?; +fn remove_service_env(directory: String, config_path: String) -> Result<()> { + let config_dir = Path::new(&config_path) + .parent() + .with_context(|| format!("config_path '{directory}' does not have a parent directory"))?; let service_path = PathBuf::from(config_dir).join(&directory); - let env_files_result = fs::read_dir(service_path); - let env_files: Vec<_> = match env_files_result { - Ok(entries) => entries - .filter_map(Result::ok) - .filter(|entry| entry.file_name().to_string_lossy().starts_with(".env")) - .collect(), - Err(e) => { - return Err(CliError::SetServiceEnv( - directory.clone(), - format!("Failed to read directory: {}", e), - )) - } - }; + let env_files: Vec<_> = fs::read_dir(&service_path) + .with_context(|| format!("Failed to read service directory {:?}", &service_path))? + .filter_map(Result::ok) + .filter(|entry| entry.file_name().to_string_lossy().starts_with(".env")) + .collect(); for env_file in env_files { let env_path = env_file.path(); diff --git a/linkup-cli/src/commands/uninstall.rs b/linkup-cli/src/commands/uninstall.rs index 6451f379..60152704 100644 --- a/linkup-cli/src/commands/uninstall.rs +++ b/linkup-cli/src/commands/uninstall.rs @@ -1,11 +1,12 @@ use std::{fs, process}; -use crate::{commands, linkup_dir_path, linkup_exe_path, prompt, CliError, InstallationMethod}; +use crate::{commands, linkup_dir_path, linkup_exe_path, prompt, InstallationMethod, Result}; #[derive(clap::Args)] pub struct Args {} -pub fn uninstall(_args: &Args) -> Result<(), CliError> { +#[cfg_attr(not(target_os = "macos"), allow(unused_variables))] +pub async fn uninstall(_args: &Args, config_arg: &Option) -> Result<()> { let response = prompt("Are you sure you want to uninstall linkup? [y/N]: ") .trim() .to_lowercase(); @@ -18,10 +19,25 @@ pub fn uninstall(_args: &Args) -> Result<(), CliError> { commands::stop(&commands::StopArgs {}, true)?; - let exe_path = linkup_exe_path(); + #[cfg(target_os = "macos")] + { + use crate::{ + commands::local_dns, + local_config::{self, LocalState}, + }; + + if local_dns::is_installed(&local_config::managed_domains( + LocalState::load().ok().as_ref(), + config_arg, + )) { + local_dns::uninstall(config_arg).await?; + } + } + + let exe_path = linkup_exe_path()?; log::debug!("Linkup exe path: {:?}", &exe_path); - match InstallationMethod::current() { + match InstallationMethod::current()? { InstallationMethod::Brew => { log::debug!("Uninstalling linkup from Homebrew"); @@ -52,7 +68,12 @@ pub fn uninstall(_args: &Args) -> Result<(), CliError> { let linkup_dir = linkup_dir_path(); log::debug!("Removing linkup folder: {}", linkup_dir.display()); - fs::remove_dir_all(linkup_dir)?; + if let Err(error) = fs::remove_dir_all(linkup_dir) { + match error.kind() { + std::io::ErrorKind::NotFound => (), + _ => return Err(error.into()), + } + } println!("linkup uninstalled!"); diff --git a/linkup-cli/src/commands/update.rs b/linkup-cli/src/commands/update.rs index 77acc184..c8607a5d 100644 --- a/linkup-cli/src/commands/update.rs +++ b/linkup-cli/src/commands/update.rs @@ -1,27 +1,46 @@ -use crate::{ - current_version, linkup_bin_dir_path, linkup_exe_path, release, CliError, InstallationMethod, -}; -use std::{fs, path::PathBuf}; +use crate::{current_version, linkup_exe_path, release, InstallationMethod, Result}; +use std::fs; #[derive(clap::Args)] pub struct Args { /// Ignore the cached last version and check remote server again for the latest version. #[arg(long)] skip_cache: bool, + + /// Which channel to update to/with. + #[arg(long)] + channel: Option, } -pub async fn update(args: &Args) -> Result<(), CliError> { +#[derive(Clone, clap::ValueEnum)] +enum DesiredChannel { + Stable, + Beta, +} + +impl From<&DesiredChannel> for linkup::VersionChannel { + fn from(value: &DesiredChannel) -> Self { + match value { + DesiredChannel::Stable => linkup::VersionChannel::Stable, + DesiredChannel::Beta => linkup::VersionChannel::Beta, + } + } +} + +pub async fn update(args: &Args) -> Result<()> { if args.skip_cache { log::debug!("Clearing cache to force a new check for the latest version."); release::clear_cache(); } - match release::available_update(¤t_version()).await { + let requested_channel = args.channel.as_ref().map(linkup::VersionChannel::from); + + match release::available_update(¤t_version(), requested_channel).await { Some(update) => { let new_linkup_path = update.linkup.download_decompressed("linkup").await.unwrap(); - let current_linkup_path = linkup_exe_path(); + let current_linkup_path = linkup_exe_path()?; let bkp_linkup_path = current_linkup_path.with_extension("bkp"); fs::rename(¤t_linkup_path, &bkp_linkup_path) @@ -29,16 +48,6 @@ pub async fn update(args: &Args) -> Result<(), CliError> { fs::rename(&new_linkup_path, ¤t_linkup_path) .expect("failed to move the new exe as the current exe"); - let new_caddy_path = update.caddy.download_decompressed("caddy").await.unwrap(); - - let current_caddy_path = get_caddy_path(); - let bkp_caddy_path = current_caddy_path.with_extension("bkp"); - - fs::rename(¤t_caddy_path, &bkp_caddy_path) - .expect("failed to move the current exe into a backup"); - fs::rename(&new_caddy_path, ¤t_caddy_path) - .expect("failed to move the new exe as the current exe"); - println!("Finished update!"); } None => { @@ -50,21 +59,14 @@ pub async fn update(args: &Args) -> Result<(), CliError> { } pub async fn new_version_available() -> bool { - release::available_update(¤t_version()) + release::available_update(¤t_version(), None) .await .is_some() } -pub fn update_command() -> String { - match InstallationMethod::current() { - InstallationMethod::Brew => "brew upgrade linkup".to_string(), - InstallationMethod::Manual | InstallationMethod::Cargo => "linkup update".to_string(), +pub fn update_command() -> Result { + match InstallationMethod::current()? { + InstallationMethod::Brew => Ok("brew upgrade linkup".to_string()), + InstallationMethod::Manual | InstallationMethod::Cargo => Ok("linkup update".to_string()), } } - -fn get_caddy_path() -> PathBuf { - let mut path = linkup_bin_dir_path(); - path.push("caddy"); - - path -} diff --git a/linkup-cli/src/env_files.rs b/linkup-cli/src/env_files.rs index 6940e658..046ca17b 100644 --- a/linkup-cli/src/env_files.rs +++ b/linkup-cli/src/env_files.rs @@ -4,7 +4,9 @@ use std::{ path::PathBuf, }; -use crate::{CliError, Result}; +use anyhow::Context; + +use crate::Result; const LINKUP_ENV_SEPARATOR: &str = "##### Linkup environment - DO NOT EDIT #####"; @@ -15,11 +17,8 @@ pub fn write_to_env_file(service: &str, dev_env_path: &PathBuf, env_path: &PathB } } - let mut dev_env_content = fs::read_to_string(dev_env_path).map_err(|e| { - CliError::SetServiceEnv( - service.to_string(), - format!("could not read dev env file: {}", e), - ) + let mut dev_env_content = fs::read_to_string(dev_env_path).with_context(|| { + format!("Failed to read service '{service}' dev env file {dev_env_path:?}") })?; if dev_env_content.ends_with('\n') { @@ -30,12 +29,7 @@ pub fn write_to_env_file(service: &str, dev_env_path: &PathBuf, env_path: &PathB .create(true) .append(true) .open(env_path) - .map_err(|e| { - CliError::SetServiceEnv( - service.to_string(), - format!("Failed to open .env file: {}", e), - ) - })?; + .with_context(|| "Failed to open service '{service}' env file {env_path:?}")?; let content = [ format!("\n{}", LINKUP_ENV_SEPARATOR), @@ -43,23 +37,15 @@ pub fn write_to_env_file(service: &str, dev_env_path: &PathBuf, env_path: &PathB format!("\n{}", LINKUP_ENV_SEPARATOR), ]; - writeln!(env_file, "{}", content.concat()).map_err(|e| { - CliError::SetServiceEnv( - service.to_string(), - format!("could not write to env file: {}", e), - ) - })?; + writeln!(env_file, "{}", content.concat()) + .with_context(|| format!("Failed to write to service '{service}' env file {env_path:?}"))?; Ok(()) } pub fn clear_env_file(service: &str, env_path: &PathBuf) -> Result<()> { - let mut file_content = fs::read_to_string(env_path).map_err(|e| { - CliError::RemoveServiceEnv( - service.to_string(), - format!("could not read dev env file: {}", e), - ) - })?; + let mut file_content = fs::read_to_string(env_path) + .with_context(|| "Failed to read service '{service}' env file {env_path:?}")?; if let (Some(mut linkup_block_start), Some(mut linkup_block_end)) = ( file_content.find(LINKUP_ENV_SEPARATOR), @@ -87,17 +73,10 @@ pub fn clear_env_file(service: &str, env_path: &PathBuf) -> Result<()> { .write(true) .truncate(true) .open(env_path) - .map_err(|e| { - CliError::RemoveServiceEnv( - service.to_string(), - format!("Failed to open .env file for writing: {}", e), - ) - })?; - file.write_all(file_content.as_bytes()).map_err(|e| { - CliError::RemoveServiceEnv( - service.to_string(), - format!("Failed to write .env file: {}", e), - ) + .with_context(|| "Failed to open service '{service}' env file {env_path:?}")?; + + file.write_all(file_content.as_bytes()).with_context(|| { + format!("Failed to write to service '{service}' env file {env_path:?}") })?; } diff --git a/linkup-cli/src/local_config.rs b/linkup-cli/src/local_config.rs index 1e0b8c28..705788f4 100644 --- a/linkup-cli/src/local_config.rs +++ b/linkup-cli/src/local_config.rs @@ -4,6 +4,7 @@ use std::{ fs, }; +use anyhow::Context; use rand::{distributions::Alphanumeric, Rng}; use serde::{Deserialize, Serialize}; use url::Url; @@ -16,7 +17,7 @@ use linkup::{ use crate::{ linkup_file_path, services, worker_client::{self, WorkerClient}, - CliError, LINKUP_CONFIG_ENV, LINKUP_STATE_FILE, + Result, LINKUP_CONFIG_ENV, LINKUP_STATE_FILE, }; #[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] @@ -27,41 +28,26 @@ pub struct LocalState { } impl LocalState { - pub fn load() -> Result { - if let Err(e) = fs::File::open(linkup_file_path(LINKUP_STATE_FILE)) { - return Err(CliError::NoState(e.to_string())); - } - - let content = match fs::read_to_string(linkup_file_path(LINKUP_STATE_FILE)) { - Ok(content) => content, - Err(e) => return Err(CliError::NoState(e.to_string())), - }; + pub fn load() -> anyhow::Result { + let state_file_path = linkup_file_path(LINKUP_STATE_FILE); + let content = fs::read_to_string(&state_file_path) + .with_context(|| format!("Failed to read state file on {:?}", &state_file_path))?; - match serde_yaml::from_str(&content) { - Ok(config) => Ok(config), - Err(e) => Err(CliError::NoState(e.to_string())), - } + serde_yaml::from_str(&content).context("Failed to parse state file") } - pub fn save(&mut self) -> Result<(), CliError> { + pub fn save(&mut self) -> Result<()> { if cfg!(test) { return Ok(()); } - let yaml_string = match serde_yaml::to_string(self) { - Ok(yaml) => yaml, - Err(_) => { - return Err(CliError::SaveState( - "Failed to serialize the state into YAML".to_string(), - )) - } - }; - if fs::write(linkup_file_path(LINKUP_STATE_FILE), yaml_string).is_err() { - return Err(CliError::SaveState(format!( - "Failed to write the state file at {}", - linkup_file_path(LINKUP_STATE_FILE).display() - ))); - } + let yaml_string = + serde_yaml::to_string(self).context("Failed to serialize the state into YAML")?; + + let state_file_location = linkup_file_path(LINKUP_STATE_FILE); + fs::write(&state_file_location, yaml_string).with_context(|| { + format!("Failed to write the state file to {state_file_location:?}") + })?; Ok(()) } @@ -81,6 +67,7 @@ impl LocalState { } } + #[cfg_attr(not(target_os = "macos"), allow(dead_code))] pub fn domain_strings(&self) -> Vec { self.domains .iter() @@ -134,19 +121,6 @@ pub struct YamlLocalConfig { } impl YamlLocalConfig { - pub fn top_level_domains(&self) -> Vec { - self.domains - .iter() - .filter(|&d| { - !self - .domains - .iter() - .any(|other| other.domain != d.domain && d.domain.ends_with(&other.domain)) - }) - .map(|d| d.domain.clone()) - .collect::>() - } - pub fn create_preview_request(&self, services: &[(String, String)]) -> CreatePreviewRequest { let services = self .services @@ -249,55 +223,38 @@ pub fn config_to_state( } } -pub fn config_path(config_arg: &Option) -> Result { +pub fn config_path(config_arg: &Option) -> Result { match config_arg { Some(path) => { let absolute_path = fs::canonicalize(path) - .map_err(|_| CliError::NoConfig("Unable to resolve absolute path".to_string()))?; + .with_context(|| format!("Unable to resolve absolute path for {path:?}"))?; + Ok(absolute_path.to_string_lossy().into_owned()) } - None => match env::var(LINKUP_CONFIG_ENV) { - Ok(val) => { - let absolute_path = fs::canonicalize(val).map_err(|_| { - CliError::NoConfig("Unable to resolve absolute path".to_string()) - })?; - Ok(absolute_path.to_string_lossy().into_owned()) - } - Err(_) => Err(CliError::NoConfig( - "No config argument provided and LINKUP_CONFIG environment variable not set" - .to_string(), - )), - }, - } -} + None => { + let path = env::var(LINKUP_CONFIG_ENV).context( + "No config argument provided and LINKUP_CONFIG environment variable not set", + )?; -pub fn get_config(config_path: &str) -> Result { - let content = match fs::read_to_string(config_path) { - Ok(content) => content, - Err(_) => { - return Err(CliError::BadConfig(format!( - "Failed to read the config file at {}", - config_path - ))) - } - }; + let absolute_path = fs::canonicalize(&path) + .with_context(|| format!("Unalbe to resolve absolute path for {path:?}"))?; - let yaml_config: YamlLocalConfig = match serde_yaml::from_str(&content) { - Ok(config) => config, - Err(_) => { - return Err(CliError::BadConfig(format!( - "Failed to deserialize the config file at {}", - config_path - ))) + Ok(absolute_path.to_string_lossy().into_owned()) } - }; + } +} + +pub fn get_config(config_path: &str) -> Result { + let content = fs::read_to_string(config_path) + .with_context(|| format!("Failed to read config file {config_path:?}"))?; - Ok(yaml_config) + serde_yaml::from_str(&content) + .with_context(|| "Failed to deserialize config file {config_path:?}") } // This method gets the local state and uploads it to both the local linkup server and // the remote linkup server (worker). -pub async fn upload_state(state: &LocalState) -> Result { +pub async fn upload_state(state: &LocalState) -> Result { let local_url = services::LocalServer::url(); let server_config = ServerConfig::from(state); @@ -326,7 +283,7 @@ pub async fn upload_state(state: &LocalState) -> Result for ServerConfig { } } +#[cfg(target_os = "macos")] +pub fn managed_domains(state: Option<&LocalState>, cfg_path: &Option) -> Vec { + let config_domains = match config_path(cfg_path).ok() { + Some(cfg_path) => match get_config(&cfg_path) { + Ok(config) => Some( + config + .domains + .iter() + .map(|storable_domain| storable_domain.domain.clone()) + .collect::>(), + ), + Err(_) => None, + }, + None => None, + }; + + let state_domains = state.map(|state| state.domain_strings()); + + let mut domain_set = std::collections::HashSet::new(); + + if let Some(domains) = config_domains { + domain_set.extend(domains); + } + + if let Some(domains) = state_domains { + domain_set.extend(domains); + } + + domain_set.into_iter().collect() +} + +#[cfg(target_os = "macos")] +pub fn top_level_domains(domains: &[String]) -> Vec { + domains + .iter() + .filter(|&domain| { + !domains + .iter() + .any(|other_domain| other_domain != domain && domain.ends_with(other_domain)) + }) + .cloned() + .collect::>() +} + #[cfg(test)] mod tests { use super::*; diff --git a/linkup-cli/src/main.rs b/linkup-cli/src/main.rs index 0e1b1cf8..0c4a86ea 100644 --- a/linkup-cli/src/main.rs +++ b/linkup-cli/src/main.rs @@ -1,9 +1,11 @@ -use std::{env, fs, io::ErrorKind, path::PathBuf, process}; +use std::{env, fs, io::ErrorKind, path::PathBuf}; +use anyhow::{anyhow, Context}; use clap::{Parser, Subcommand}; use colored::Colorize; use thiserror::Error; +pub use anyhow::Result; pub use linkup::Version; mod commands; @@ -15,7 +17,6 @@ mod worker_client; const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION"); const LINKUP_CONFIG_ENV: &str = "LINKUP_CONFIG"; -const LINKUP_LOCALSERVER_PORT: u16 = 9066; const LINKUP_DIR: &str = ".linkup"; const LINKUP_STATE_FILE: &str = "state"; @@ -26,22 +27,22 @@ pub enum InstallationMethod { } impl InstallationMethod { - fn current() -> Self { - for component in linkup_exe_path().components() { + fn current() -> Result { + for component in linkup_exe_path()?.components() { if component.as_os_str() == "Cellar" { - return Self::Brew; + return Ok(Self::Brew); } else if component.as_os_str() == ".cargo" { - return Self::Cargo; + return Ok(Self::Cargo); } } - Self::Manual + Ok(Self::Manual) } } -pub fn linkup_exe_path() -> PathBuf { - fs::canonicalize(std::env::current_exe().expect("current exe to be accessible")) - .expect("exe path to be valid") +pub fn linkup_exe_path() -> Result { + fs::canonicalize(std::env::current_exe().context("Failed to get the current executable")?) + .context("Failed to canonicalize the executable path") } pub fn linkup_dir_path() -> PathBuf { @@ -62,6 +63,12 @@ pub fn linkup_bin_dir_path() -> PathBuf { path } +pub fn linkup_certs_dir_path() -> PathBuf { + let mut path = linkup_dir_path(); + path.push("certs"); + path +} + pub fn linkup_file_path(file: &str) -> PathBuf { let mut path = linkup_dir_path(); path.push(file); @@ -75,11 +82,11 @@ fn ensure_linkup_dir() -> Result<()> { Ok(_) => Ok(()), Err(e) => match e.kind() { ErrorKind::AlreadyExists => Ok(()), - _ => Err(CliError::BadConfig(format!( + _ => Err(anyhow!( "Could not create linkup dir at {}: {}", path.display(), e - ))), + )), }, } } @@ -89,11 +96,12 @@ fn current_version() -> Version { .expect("current version on CARGO_PKG_VERSION should be a valid version") } +#[cfg(target_os = "macos")] fn is_sudo() -> bool { - let sudo_check = process::Command::new("sudo") + let sudo_check = std::process::Command::new("sudo") .arg("-n") - .stdout(process::Stdio::null()) - .stderr(process::Stdio::null()) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) .arg("true") .status(); @@ -104,16 +112,17 @@ fn is_sudo() -> bool { false } +#[cfg(target_os = "macos")] fn sudo_su() -> Result<()> { - let status = process::Command::new("sudo") + let status = std::process::Command::new("sudo") .arg("su") - .stdin(process::Stdio::null()) - .stdout(process::Stdio::null()) - .stderr(process::Stdio::null()) + .stdin(std::process::Stdio::null()) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) .status()?; if !status.success() { - return Err(CliError::StartErr("failed to sudo".to_string())); + return Err(anyhow!("Failed to sudo")); } Ok(()) @@ -131,70 +140,6 @@ fn prompt(question: &str) -> String { input } -pub type Result = std::result::Result; - -#[derive(Error, Debug)] -pub enum CliError { - #[error("no valid state file: {0}")] - NoState(String), - #[error("there was a problem with the provided config: {0}")] - BadConfig(String), - #[error("no valid config file provided: {0}")] - NoConfig(String), - #[error("a service directory was provided that contained no .env.*.linkup file: {0}")] - NoDevEnv(String), - #[error("couldn't set env for service {0}: {1}")] - SetServiceEnv(String, String), - #[error("couldn't remove env for service {0}: {1}")] - RemoveServiceEnv(String, String), - #[error("could not save statefile: {0}")] - SaveState(String), - #[error("could not start local server: {0}")] - StartLocalServer(String), - #[error("could not start local tunnel: {0}")] - StartLocalTunnel(String), - #[error("linkup component did not start in time: {0}")] - StartLinkupTimeout(String), - #[error("could not start Caddy: {0}")] - StartCaddy(String), - #[error("could not start DNSMasq: {0}")] - StartDNSMasq(String), - #[error("could not load config to {0}: {1}")] - LoadConfig(String, String), - #[error("could not start: {0}")] - StartErr(String), - #[error("could not stop: {0}")] - StopErr(String), - #[error("could not get status: {0}")] - StatusErr(String), - #[error("no such service: {0}")] - NoSuchService(String), - #[error("failed to install local dns: {0}")] - LocalDNSInstall(String), - #[error("failed to uninstall local dns: {0}")] - LocalDNSUninstall(String), - #[error("failed to write file: {0}")] - WriteFile(String), - #[error("failed to reboot dnsmasq: {0}")] - RebootDNSMasq(String), - #[error("--no-tunnel does not work without `local-dns`")] - NoTunnelWithoutLocalDns, - #[error("could not get env var: {0}")] - GetEnvVar(String), - #[error("HTTP error: {0}")] - HttpErr(String), - #[error("could not parse: {0}. {1}")] - ParseErr(String, String), - #[error("{0}: {1}")] - FileErr(String, String), - #[error("{0}")] - IOError(#[from] std::io::Error), - #[error("{0}")] - WorkerClientErr(#[from] worker_client::Error), - #[error("{0}")] - DeployErr(#[from] commands::deploy::DeployError), -} - #[derive(Error, Debug)] pub enum CheckErr { #[error("local server not started")] @@ -245,6 +190,7 @@ enum Commands { #[clap(about = "View linkup component and service status")] Status(commands::StatusArgs), + #[cfg(target_os = "macos")] #[clap(about = "Speed up your local environment by routing traffic locally when possible")] LocalDNS(commands::LocalDnsArgs), @@ -273,7 +219,7 @@ enum Commands { } #[tokio::main] -async fn main() -> Result<()> { +async fn main() -> anyhow::Result<()> { env_logger::init(); let cli = Cli::parse(); @@ -283,13 +229,22 @@ async fn main() -> Result<()> { if !matches!(cli.command, Commands::Update(_)) && commands::update::new_version_available().await { - let message = format!( - "⚠️ New version of linkup is available! Run `{}` to update it.", - commands::update::update_command() - ) - .yellow(); - - println!("{}", message); + match commands::update::update_command() { + Ok(update_command) => { + let message = format!( + "⚠️ New version of linkup is available! Run `{update_command}` to update it." + ) + .yellow(); + + println!("{}", message); + } + Err(error) => { + // TODO(augustoccesar)[2025-03-26]: This should probably be an error log, but for now since the logs + // are not behaving the way that we want them to, keep as a warning. Will revisit this once starts + // looking into tracing. + log::warn!("Failed to resolve the update command to display to user: {error}"); + } + } } match &cli.command { @@ -300,13 +255,14 @@ async fn main() -> Result<()> { Commands::Local(args) => commands::local(args).await, Commands::Remote(args) => commands::remote(args).await, Commands::Status(args) => commands::status(args), + #[cfg(target_os = "macos")] Commands::LocalDNS(args) => commands::local_dns(args, &cli.config).await, Commands::Completion(args) => commands::completion(args), Commands::Preview(args) => commands::preview(args, &cli.config).await, Commands::Server(args) => commands::server(args).await, - Commands::Uninstall(args) => commands::uninstall(args), + Commands::Uninstall(args) => commands::uninstall(args, &cli.config).await, Commands::Update(args) => commands::update(args).await, - Commands::Deploy(args) => commands::deploy(args).await.map_err(CliError::from), - Commands::Destroy(args) => commands::destroy(args).await.map_err(CliError::from), + Commands::Deploy(args) => commands::deploy(args).await, + Commands::Destroy(args) => commands::destroy(args).await, } } diff --git a/linkup-cli/src/release.rs b/linkup-cli/src/release.rs index fabab276..69cbaff7 100644 --- a/linkup-cli/src/release.rs +++ b/linkup-cli/src/release.rs @@ -5,6 +5,7 @@ use std::{ }; use flate2::read::GzDecoder; +use linkup::VersionChannel; use reqwest::header::HeaderValue; use serde::{Deserialize, Serialize}; use tar::Archive; @@ -12,7 +13,8 @@ use url::Url; use crate::{linkup_file_path, Version}; -const CACHED_LATEST_RELEASE_FILE: &str = "latest_release.json"; +const CACHED_LATEST_STABLE_RELEASE_FILE: &str = "latest_release_stable.json"; +const CACHED_LATEST_BETA_RELEASE_FILE: &str = "latest_release_beta.json"; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -113,42 +115,6 @@ impl Release { asset } - - /// Examples of Caddy asset files: - /// - caddy-darwin-amd64.tar.gz - /// - caddy-darwin-arm64.tar.gz - /// - caddy-linux-amd64.tar.gz - /// - caddy-linux-arm64.tar.gz - pub fn caddy_asset(&self, os: &str, arch: &str) -> Option { - let lookup_os = match os { - "macos" => "darwin", - "linux" => "linux", - lookup_os => lookup_os, - }; - - let lookup_arch = match arch { - "x86_64" => "amd64", - "aarch64" => "arm64", - lookup_arch => lookup_arch, - }; - - let asset = self - .assets - .iter() - .find(|asset| asset.name == format!("caddy-{}-{}.tar.gz", lookup_os, lookup_arch)) - .cloned(); - - if asset.is_none() { - log::debug!( - "Caddy release for OS '{}' and ARCH '{}' not found on version {}", - lookup_os, - lookup_arch, - &self.version - ); - } - - asset - } } #[derive(Serialize, Deserialize)] @@ -159,18 +125,45 @@ struct CachedLatestRelease { pub struct Update { pub linkup: Asset, - pub caddy: Asset, } -pub async fn available_update(current_version: &Version) -> Option { +pub async fn available_update( + current_version: &Version, + desired_channel: Option, +) -> Option { let os = env::consts::OS; let arch = env::consts::ARCH; - let latest_release = match cached_latest_release().await { - Some(cached_latest_release) => cached_latest_release.release, + let channel = desired_channel.unwrap_or_else(|| current_version.channel()); + log::debug!("Looking for available update on '{channel}' channel."); + + let latest_release = match cached_latest_release(&channel).await { + Some(cached_latest_release) => { + let release = cached_latest_release.release; + + log::debug!("Found cached release: {}", release.version); + + release + } None => { - let release = match fetch_latest_release().await { - Ok(release) => release, + log::debug!("No cached release found. Fetching from remote..."); + + let release = match channel { + linkup::VersionChannel::Stable => fetch_stable_release().await, + linkup::VersionChannel::Beta => fetch_beta_release().await, + }; + + let release = match release { + Ok(Some(release)) => { + log::debug!("Found release {} on channel '{channel}'.", release.version); + + release + } + Ok(None) => { + log::debug!("No release found on remote for channel '{channel}'"); + + return None; + } Err(error) => { log::error!("Failed to fetch the latest release: {}", error); @@ -178,7 +171,12 @@ pub async fn available_update(current_version: &Version) -> Option { } }; - match fs::File::create(linkup_file_path(CACHED_LATEST_RELEASE_FILE)) { + let cache_file = match channel { + VersionChannel::Stable => CACHED_LATEST_STABLE_RELEASE_FILE, + VersionChannel::Beta => CACHED_LATEST_BETA_RELEASE_FILE, + }; + + match fs::File::create(linkup_file_path(cache_file)) { Ok(new_file) => { let release_cache = CachedLatestRelease { time: now(), @@ -213,21 +211,21 @@ pub async fn available_update(current_version: &Version) -> Option { } }; - if current_version >= &latest_version { + // Only check the version if the channel is the same. + if current_version.channel() == latest_version.channel() && current_version >= &latest_version { + log::debug!("Current version ({current_version}) is newer than latest ({latest_version})."); + return None; } - let caddy = latest_release - .caddy_asset(os, arch) - .expect("Caddy asset to be present on a release"); let linkup = latest_release .linkup_asset(os, arch) .expect("Linkup asset to be present on a release"); - Some(Update { linkup, caddy }) + Some(Update { linkup }) } -async fn fetch_latest_release() -> Result { +async fn fetch_stable_release() -> Result, reqwest::Error> { let url: Url = "https://api.github.com/repos/mentimeter/linkup/releases/latest" .parse() .unwrap(); @@ -249,18 +247,15 @@ async fn fetch_latest_release() -> Result { .build() .unwrap(); - client.execute(req).await?.json().await -} + let release = client.execute(req).await?.json().await?; -pub async fn fetch_release(version: &Version) -> Result, reqwest::Error> { - let tag = version.to_string(); + Ok(Some(release)) +} - let url: Url = format!( - "https://api.github.com/repos/mentimeter/linkup/releases/tags/{}", - &tag - ) - .parse() - .unwrap(); +pub async fn fetch_beta_release() -> Result, reqwest::Error> { + let url: Url = "https://api.github.com/repos/mentimeter/linkup/releases" + .parse() + .unwrap(); let mut req = reqwest::Request::new(reqwest::Method::GET, url); let headers = req.headers_mut(); @@ -279,11 +274,22 @@ pub async fn fetch_release(version: &Version) -> Result, reqwest .build() .unwrap(); - client.execute(req).await?.json().await + let releases: Vec = client.execute(req).await?.json().await?; + + let beta_release = releases + .into_iter() + .find(|release| release.version.starts_with("0.0.0-next-")); + + Ok(beta_release) } -async fn cached_latest_release() -> Option { - let path = linkup_file_path(CACHED_LATEST_RELEASE_FILE); +async fn cached_latest_release(channel: &VersionChannel) -> Option { + let file = match channel { + VersionChannel::Stable => CACHED_LATEST_STABLE_RELEASE_FILE, + VersionChannel::Beta => CACHED_LATEST_STABLE_RELEASE_FILE, + }; + + let path = linkup_file_path(file); if !path.exists() { return None; } @@ -325,11 +331,14 @@ async fn cached_latest_release() -> Option { } pub fn clear_cache() { - let path = linkup_file_path(CACHED_LATEST_RELEASE_FILE); - - if path.exists() { - if let Err(error) = fs::remove_file(path) { - log::error!("Failed to delete latest release cache file: {}", error); + for path in [ + linkup_file_path(CACHED_LATEST_STABLE_RELEASE_FILE), + linkup_file_path(CACHED_LATEST_BETA_RELEASE_FILE), + ] { + if path.exists() { + if let Err(error) = fs::remove_file(&path) { + log::error!("Failed to delete release cache file {path:?}: {error}"); + } } } } diff --git a/linkup-cli/src/services/caddy.rs b/linkup-cli/src/services/caddy.rs deleted file mode 100644 index 0c223cc4..00000000 --- a/linkup-cli/src/services/caddy.rs +++ /dev/null @@ -1,314 +0,0 @@ -use std::{env, fs, path::PathBuf, process::Command}; - -use url::Url; - -use crate::{ - commands::local_dns, current_version, linkup_bin_dir_path, linkup_dir_path, linkup_file_path, - local_config::LocalState, release, Version, -}; - -use super::{ - get_running_pid, local_server::LINKUP_LOCAL_SERVER_PORT, stop_pid_file, BackgroundService, Pid, - PidError, Signal, -}; - -#[derive(thiserror::Error, Debug)] -pub enum Error { - #[error("Failed to start the Caddy service")] - Starting, - #[error("Failed while handing file: {0}")] - FileHandling(#[from] std::io::Error), - #[error("Failed to stop pid: {0}")] - StoppingPid(#[from] PidError), -} - -#[derive(thiserror::Error, Debug)] -pub enum InstallError { - #[error("Failed while handing file: {0}")] - FileHandling(#[from] std::io::Error), - #[error("Failed to fetch release information: {0}")] - FetchError(#[from] reqwest::Error), - #[error("Release not found for version {0}")] - ReleaseNotFound(Version), - #[error("Caddy asset not found on release for version {0}")] - AssetNotFound(Version), - #[error("Failed to download Caddy asset: {0}")] - AssetDownload(String), -} - -#[derive(thiserror::Error, Debug)] -pub enum UninstallError { - #[error("Failed while handing file: {0}")] - FileHandling(#[from] std::io::Error), -} - -pub struct Caddy { - caddyfile_path: PathBuf, - stdout_file_path: PathBuf, - stderr_file_path: PathBuf, - pidfile_path: PathBuf, -} - -impl Caddy { - pub fn new() -> Self { - Self { - caddyfile_path: linkup_file_path("Caddyfile"), - stdout_file_path: linkup_file_path("caddy-stdout"), - stderr_file_path: linkup_file_path("caddy-stderr"), - pidfile_path: linkup_file_path("caddy-pid"), - } - } - - pub async fn install() -> Result<(), InstallError> { - let bin_dir_path = linkup_bin_dir_path(); - fs::create_dir_all(&bin_dir_path)?; - - let mut caddy_path = bin_dir_path.clone(); - caddy_path.push("caddy"); - - if fs::exists(&caddy_path)? { - log::debug!( - "Caddy executable already exists on {}", - &bin_dir_path.display() - ); - return Ok(()); - } - - let version = current_version(); - match release::fetch_release(&version).await? { - Some(release) => { - let os = env::consts::OS; - let arch = env::consts::ARCH; - - match release.caddy_asset(os, arch) { - Some(asset) => match asset.download_decompressed("caddy").await { - Ok(downloaded_caddy_path) => { - log::debug!( - "Moving downloaded Caddy file from {:?} to {:?}", - &downloaded_caddy_path, - &caddy_path - ); - - fs::copy(&downloaded_caddy_path, &caddy_path)?; - fs::remove_file(&downloaded_caddy_path)?; - } - Err(error) => return Err(InstallError::AssetDownload(error.to_string())), - }, - None => { - log::warn!( - "Failed to find Caddy asset on release for version {}", - &version - ); - - return Err(InstallError::AssetNotFound(version.clone())); - } - } - } - None => { - log::warn!("Failed to find release for version {}", &version); - - return Err(InstallError::ReleaseNotFound(version.clone())); - } - } - - Ok(()) - } - - pub async fn uninstall() -> Result<(), UninstallError> { - let mut path = linkup_bin_dir_path(); - path.push("caddy"); - - if !fs::exists(&path)? { - log::debug!("Caddy executable does not exist on {}", &path.display()); - - return Ok(()); - } - - fs::remove_file(&path)?; - - Ok(()) - } - - fn start(&self, worker_url: &Url, worker_token: &str, domains: &[String]) -> Result<(), Error> { - log::debug!("Starting {}", Self::NAME); - - let domains_and_subdomains: Vec = domains - .iter() - .map(|domain| format!("{domain}, *.{domain}")) - .collect(); - - self.write_caddyfile(worker_url, worker_token, &domains_and_subdomains)?; - - let stdout_file = fs::File::create(&self.stdout_file_path)?; - let stderr_file = fs::File::create(&self.stderr_file_path)?; - - #[cfg(target_os = "macos")] - let status = Command::new("./bin/caddy") - .current_dir(linkup_dir_path()) - .arg("start") - .arg("--pidfile") - .arg(&self.pidfile_path) - .stdout(stdout_file) - .stderr(stderr_file) - .status()?; - - #[cfg(target_os = "linux")] - let status = { - // To make sure that the local user is the owner of the pidfile and not root, - // we create it before running the caddy command. - let _ = fs::File::create(&self.pidfile_path)?; - - Command::new("sudo") - .current_dir(linkup_dir_path()) - .arg("./bin/caddy") - .arg("start") - .arg("--pidfile") - .arg(&self.pidfile_path) - .stdin(std::process::Stdio::null()) - .stdout(stdout_file) - .stderr(stderr_file) - .status()? - }; - - if !status.success() { - return Err(Error::Starting); - } - - Ok(()) - } - - pub fn stop(&self) { - log::debug!("Stopping {}", Self::NAME); - - stop_pid_file(&self.pidfile_path, Signal::Term); - } - - fn write_caddyfile( - &self, - worker_url: &Url, - worker_token: &str, - domains: &[String], - ) -> Result<(), Error> { - let worker_url_str = worker_url.as_str().trim_end_matches('/'); - let logfile_path = self.stdout_file_path.display(); - let domains_str = domains.join(", "); - - let caddy_template = format!( - " - {{ - http_port 80 - https_port 443 - log {{ - output file {logfile_path} - }} - storage linkup {{ - worker_url \"{worker_url_str}\" - token \"{worker_token}\" - }} - }} - - {domains_str} {{ - reverse_proxy localhost:{LINKUP_LOCAL_SERVER_PORT} - tls {{ - resolvers 1.1.1.1 - dns linkup {{ - worker_url \"{worker_url_str}\" - token \"{worker_token}\" - }} - }} - }} - ", - ); - - fs::write(&self.caddyfile_path, caddy_template)?; - - Ok(()) - } - - pub fn should_start(&self, domains: &[String]) -> Result { - if !is_installed() { - return Ok(false); - } - - let resolvers = local_dns::list_resolvers()?; - - Ok(domains.iter().any(|domain| resolvers.contains(domain))) - } - - pub fn running_pid(&self) -> Option { - get_running_pid(&self.pidfile_path) - } -} - -impl BackgroundService for Caddy { - const NAME: &str = "Caddy"; - - async fn run_with_progress( - &self, - state: &mut LocalState, - status_sender: std::sync::mpsc::Sender, - ) -> Result<(), Error> { - let domains = &state.domain_strings(); - - match self.should_start(domains) { - Ok(true) => (), - Ok(false) => { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Skipped, - "Local DNS not installed", - ); - - return Ok(()); - } - Err(err) => { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Skipped, - "Failed to read resolvers folder", - ); - - log::warn!("Failed to read resolvers folder: {}", err); - - return Ok(()); - } - } - - self.notify_update(&status_sender, super::RunStatus::Starting); - - if self.running_pid().is_some() { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Started, - "Was already running", - ); - - return Ok(()); - } - - if let Err(e) = self.start( - &state.linkup.worker_url, - &state.linkup.worker_token, - domains, - ) { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Error, - "Failed to start", - ); - - return Err(e); - } - - self.notify_update(&status_sender, super::RunStatus::Started); - - Ok(()) - } -} - -pub fn is_installed() -> bool { - let mut caddy_path = linkup_bin_dir_path(); - caddy_path.push("caddy"); - - caddy_path.exists() -} diff --git a/linkup-cli/src/services/cloudflare_tunnel.rs b/linkup-cli/src/services/cloudflare_tunnel.rs index cb6e4e54..f1619ac8 100644 --- a/linkup-cli/src/services/cloudflare_tunnel.rs +++ b/linkup-cli/src/services/cloudflare_tunnel.rs @@ -17,12 +17,9 @@ use serde::{Deserialize, Serialize}; use tokio::time::sleep; use url::Url; -use crate::{ - linkup_file_path, local_config::LocalState, worker_client::WorkerClient, - LINKUP_LOCALSERVER_PORT, -}; +use crate::{linkup_file_path, local_config::LocalState, worker_client::WorkerClient, Result}; -use super::{get_running_pid, stop_pid_file, BackgroundService, Pid, PidError, Signal}; +use super::{find_service_pid, BackgroundService, PidError}; #[derive(thiserror::Error, Debug)] #[allow(dead_code)] @@ -63,7 +60,7 @@ impl CloudflareTunnel { worker_url: &Url, worker_token: &str, linkup_session_name: &str, - ) -> Result { + ) -> Result { let stdout_file = File::create(&self.stdout_file_path)?; let stderr_file = File::create(&self.stderr_file_path)?; @@ -94,6 +91,7 @@ impl CloudflareTunnel { .stdout(stdout_file) .stderr(stderr_file) .stdin(Stdio::null()) + .env("LINKUP_SERVICE_ID", Self::ID) .args([ "tunnel", "--pidfile", @@ -107,16 +105,6 @@ impl CloudflareTunnel { Ok(tunnel_url) } - pub fn stop(&self) { - log::debug!("Stopping {}", Self::NAME); - - stop_pid_file(&self.pidfile_path, Signal::Interrupt); - } - - pub fn running_pid(&self) -> Option { - get_running_pid(&self.pidfile_path) - } - async fn dns_propagated(&self, tunnel_url: &Url) -> bool { let mut opts = ResolverOpts::default(); opts.cache_size = 0; // Disable caching @@ -142,7 +130,7 @@ impl CloudflareTunnel { false } - fn update_state(&self, tunnel_url: &Url, state: &mut LocalState) -> Result<(), Error> { + fn update_state(&self, tunnel_url: &Url, state: &mut LocalState) -> Result<()> { debug!("Adding tunnel url {} to the state", tunnel_url.as_str()); state.linkup.tunnel = Some(tunnel_url.clone()); @@ -154,14 +142,15 @@ impl CloudflareTunnel { } } -impl BackgroundService for CloudflareTunnel { +impl BackgroundService for CloudflareTunnel { + const ID: &str = "cloudflare-tunnel"; const NAME: &str = "Cloudflare Tunnel"; async fn run_with_progress( &self, state: &mut LocalState, status_sender: std::sync::mpsc::Sender, - ) -> Result<(), Error> { + ) -> Result<()> { if !state.should_use_tunnel() { self.notify_update_with_details( &status_sender, @@ -179,10 +168,10 @@ impl BackgroundService for CloudflareTunnel { "Empty session name", ); - return Err(Error::InvalidSessionName(state.linkup.session_name.clone())); + return Err(Error::InvalidSessionName(state.linkup.session_name.clone()).into()); } - if self.running_pid().is_some() { + if find_service_pid(Self::ID).is_some() { self.notify_update_with_details( &status_sender, super::RunStatus::Started, @@ -228,7 +217,7 @@ impl BackgroundService for CloudflareTunnel { "Failed to start tunnel", ); - return Err(Error::PidfileNotFound); + return Err(Error::PidfileNotFound.into()); } self.notify_update(&status_sender, super::RunStatus::Starting); @@ -262,7 +251,7 @@ impl BackgroundService for CloudflareTunnel { "Failed to propagate tunnel DNS", ); - return Err(Error::DNSNotPropagated); + return Err(Error::DNSNotPropagated.into()); } self.notify_update(&status_sender, super::RunStatus::Starting); @@ -351,7 +340,7 @@ fn create_config_yml(tunnel_id: &str) -> Result<(), Error> { let credentials_file_path_str = credentials_file_path.to_string_lossy().to_string(); let config = Config { - url: format!("http://localhost:{}", LINKUP_LOCALSERVER_PORT), + url: "http://localhost".to_string(), tunnel: tunnel_id.to_string(), credentials_file: credentials_file_path_str, }; @@ -362,3 +351,32 @@ fn create_config_yml(tunnel_id: &str) -> Result<(), Error> { Ok(()) } + +// Get the pid from a pidfile, but only return Some in case the pidfile is valid and the written pid on the file +// is running. +fn get_running_pid(file_path: &Path) -> Option { + let pid = match get_pid(file_path) { + Ok(pid) => pid, + Err(_) => return None, + }; + + super::system().process(pid).map(|_| pid) +} + +fn get_pid(file_path: &Path) -> Result { + if let Err(e) = File::open(file_path) { + return Err(PidError::NoPidFile(e.to_string())); + } + + match fs::read_to_string(file_path) { + Ok(content) => { + let pid_u32 = content + .trim() + .parse::() + .map_err(|e| PidError::BadPidFile(e.to_string()))?; + + Ok(super::Pid::from_u32(pid_u32)) + } + Err(e) => Err(PidError::BadPidFile(e.to_string())), + } +} diff --git a/linkup-cli/src/services/dnsmasq.rs b/linkup-cli/src/services/dnsmasq.rs deleted file mode 100644 index 4c46544e..00000000 --- a/linkup-cli/src/services/dnsmasq.rs +++ /dev/null @@ -1,182 +0,0 @@ -use std::{ - fmt::Write, - fs, - path::PathBuf, - process::{Command, Stdio}, -}; - -use crate::{commands::local_dns, linkup_dir_path, linkup_file_path, local_config::LocalState}; - -use super::{caddy, get_running_pid, stop_pid_file, BackgroundService, Pid, PidError, Signal}; - -#[derive(thiserror::Error, Debug)] -pub enum Error { - #[error("Failed while handing file: {0}")] - FileHandling(#[from] std::io::Error), - #[error("Failed to stop pid: {0}")] - StoppingPid(#[from] PidError), -} - -pub struct Dnsmasq { - port: u16, - config_file_path: PathBuf, - log_file_path: PathBuf, - pid_file_path: PathBuf, -} - -impl Dnsmasq { - pub fn new() -> Self { - Self { - port: 8053, - config_file_path: linkup_file_path("dnsmasq-conf"), - log_file_path: linkup_file_path("dnsmasq-log"), - pid_file_path: linkup_file_path("dnsmasq-pid"), - } - } - - fn setup(&self, domains: &[String], linkup_session_name: &str) -> Result<(), Error> { - let local_domains_template = domains.iter().fold(String::new(), |mut acc, d| { - let _ = write!( - acc, - "address=/{0}.{1}/127.0.0.1\naddress=/{0}.{1}/::1\nlocal=/{0}.{1}/\n", - linkup_session_name, d, - ); - acc - }); - - let dnsmasq_template = format!( - "{} - -port={} -log-facility={} -pid-file={}\n", - local_domains_template, - self.port, - self.log_file_path.display(), - self.pid_file_path.display(), - ); - - fs::write(&self.config_file_path, dnsmasq_template)?; - - Ok(()) - } - - fn start(&self) -> Result<(), Error> { - log::debug!("Starting {}", Self::NAME); - - Command::new("dnsmasq") - .current_dir(linkup_dir_path()) - .arg("--log-queries") - .arg("-C") - .arg(&self.config_file_path) - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .status()?; - - Ok(()) - } - - pub fn stop(&self) { - log::debug!("Stopping {}", Self::NAME); - - stop_pid_file(&self.pid_file_path, Signal::Term); - } - - pub fn running_pid(&self) -> Option { - get_running_pid(&self.pid_file_path) - } - - fn should_start(&self, domains: &[String]) -> Result { - if !caddy::is_installed() { - return Ok(false); - } - - let resolvers = local_dns::list_resolvers()?; - - Ok(domains.iter().any(|domain| resolvers.contains(domain))) - } -} - -impl BackgroundService for Dnsmasq { - const NAME: &str = "Dnsmasq"; - - async fn run_with_progress( - &self, - state: &mut LocalState, - status_sender: std::sync::mpsc::Sender, - ) -> Result<(), Error> { - let domains = &state.domain_strings(); - - match self.should_start(domains) { - Ok(true) => (), - Ok(false) => { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Skipped, - "Local DNS not installed", - ); - - return Ok(()); - } - Err(err) => { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Skipped, - "Failed to read resolvers folder", - ); - - log::warn!("Failed to read resolvers folder: {}", err); - - return Ok(()); - } - } - - self.notify_update(&status_sender, super::RunStatus::Starting); - - if self.running_pid().is_some() { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Started, - "Was already running", - ); - - return Ok(()); - } - - if let Err(e) = self.setup(domains, &state.linkup.session_name) { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Error, - "Failed to setup", - ); - - return Err(e); - } - - if let Err(e) = self.start() { - self.notify_update_with_details( - &status_sender, - super::RunStatus::Error, - "Failed to start", - ); - - return Err(e); - } - - self.notify_update(&status_sender, super::RunStatus::Started); - - Ok(()) - } -} - -pub fn is_installed() -> bool { - let res = Command::new("which") - .args(["dnsmasq"]) - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .stdin(Stdio::null()) - .status() - .unwrap(); - - res.success() -} diff --git a/linkup-cli/src/services/local_dns_server.rs b/linkup-cli/src/services/local_dns_server.rs new file mode 100644 index 00000000..80638c08 --- /dev/null +++ b/linkup-cli/src/services/local_dns_server.rs @@ -0,0 +1,97 @@ +use std::{ + env, + fs::File, + os::unix::process::CommandExt, + path::PathBuf, + process::{self, Stdio}, +}; + +use anyhow::Context; + +use crate::{commands::local_dns, linkup_file_path, local_config::LocalState, Result}; + +use super::BackgroundService; + +pub struct LocalDnsServer { + stdout_file_path: PathBuf, + stderr_file_path: PathBuf, +} + +impl LocalDnsServer { + pub fn new() -> Self { + Self { + stdout_file_path: linkup_file_path("localdns-stdout"), + stderr_file_path: linkup_file_path("localdns-stderr"), + } + } + + fn start(&self, session_name: &str, domains: &[String]) -> Result<()> { + log::debug!("Starting {}", Self::NAME); + + let stdout_file = File::create(&self.stdout_file_path)?; + let stderr_file = File::create(&self.stderr_file_path)?; + + let mut command = process::Command::new( + env::current_exe().context("Failed to get the current executable")?, + ); + command.env("RUST_LOG", "debug"); + command.env("LINKUP_SERVICE_ID", Self::ID); + command.args([ + "server", + "dns", + "--session-name", + session_name, + "--domains", + &domains.join(","), + ]); + + command + .process_group(0) + .stdout(stdout_file) + .stderr(stderr_file) + .stdin(Stdio::null()) + .spawn()?; + + Ok(()) + } +} + +impl BackgroundService for LocalDnsServer { + const ID: &str = "linkup-local-dns-server"; + const NAME: &str = "Local DNS server"; + + async fn run_with_progress( + &self, + state: &mut LocalState, + status_sender: std::sync::mpsc::Sender, + ) -> Result<()> { + self.notify_update(&status_sender, super::RunStatus::Starting); + + let session_name = state.linkup.session_name.clone(); + let domains = state.domain_strings(); + + if !local_dns::is_installed(&domains) { + self.notify_update_with_details( + &status_sender, + super::RunStatus::Skipped, + "Not installed", + ); + + return Ok(()); + } + + if let Err(e) = self.start(&session_name, &domains) { + self.notify_update_with_details( + &status_sender, + super::RunStatus::Error, + "Failed to start", + ); + + return Err(e); + } + + self.notify_update(&status_sender, super::RunStatus::Started); + + Ok(()) + } +} diff --git a/linkup-cli/src/services/local_server.rs b/linkup-cli/src/services/local_server.rs index 75f21514..495aadc3 100644 --- a/linkup-cli/src/services/local_server.rs +++ b/linkup-cli/src/services/local_server.rs @@ -7,19 +7,18 @@ use std::{ time::Duration, }; +use anyhow::Context; use reqwest::StatusCode; use tokio::time::sleep; use url::Url; use crate::{ - linkup_file_path, + linkup_certs_dir_path, linkup_file_path, local_config::{upload_state, LocalState}, - worker_client, + worker_client, Result, }; -use super::{get_running_pid, stop_pid_file, BackgroundService, Pid, PidError, Signal}; - -pub const LINKUP_LOCAL_SERVER_PORT: u16 = 9066; +use super::{BackgroundService, PidError}; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -36,7 +35,6 @@ pub enum Error { pub struct LocalServer { stdout_file_path: PathBuf, stderr_file_path: PathBuf, - pidfile_path: PathBuf, } impl LocalServer { @@ -44,39 +42,31 @@ impl LocalServer { Self { stdout_file_path: linkup_file_path("localserver-stdout"), stderr_file_path: linkup_file_path("localserver-stderr"), - pidfile_path: linkup_file_path("localserver-pid"), } } + /// For internal communication to local-server, we only use the port 80 (HTTP). pub fn url() -> Url { - Url::parse(&format!("http://localhost:{}", LINKUP_LOCAL_SERVER_PORT)) - .expect("linkup url invalid") + Url::parse("http://localhost:80").expect("linkup url invalid") } - fn start(&self) -> Result<(), Error> { + fn start(&self) -> Result<()> { log::debug!("Starting {}", Self::NAME); let stdout_file = File::create(&self.stdout_file_path)?; let stderr_file = File::create(&self.stderr_file_path)?; - // When running with cargo (e.g. `cargo run -- start`), we should start the server also with cargo. - let mut command = if env::var("CARGO").is_ok() { - let mut cmd = process::Command::new("cargo"); - cmd.args([ - "run", - "--", - "server", - "--pidfile", - self.pidfile_path.to_str().unwrap(), - ]); - - cmd - } else { - let mut cmd = process::Command::new("linkup"); - cmd.args(["server", "--pidfile", self.pidfile_path.to_str().unwrap()]); - - cmd - }; + let mut command = process::Command::new( + env::current_exe().context("Failed to get the current executable")?, + ); + command.env("RUST_LOG", "debug"); + command.env("LINKUP_SERVICE_ID", Self::ID); + command.args([ + "server", + "local-worker", + "--certs-dir", + linkup_certs_dir_path().to_str().unwrap(), + ]); command .process_group(0) @@ -88,16 +78,6 @@ impl LocalServer { Ok(()) } - pub fn stop(&self) { - log::debug!("Stopping {}", Self::NAME); - - stop_pid_file(&self.pidfile_path, Signal::Interrupt); - } - - pub fn running_pid(&self) -> Option { - get_running_pid(&self.pidfile_path) - } - async fn reachable(&self) -> bool { let client = reqwest::Client::builder() .timeout(Duration::from_secs(1)) @@ -110,7 +90,7 @@ impl LocalServer { matches!(response, Ok(res) if res.status() == StatusCode::OK) } - async fn update_state(&self, state: &mut LocalState) -> Result<(), Error> { + async fn update_state(&self, state: &mut LocalState) -> Result<()> { let session_name = upload_state(state).await?; state.linkup.session_name = session_name; @@ -122,14 +102,15 @@ impl LocalServer { } } -impl BackgroundService for LocalServer { +impl BackgroundService for LocalServer { + const ID: &str = "linkup-local-server"; const NAME: &str = "Linkup local server"; async fn run_with_progress( &self, state: &mut LocalState, status_sender: std::sync::mpsc::Sender, - ) -> Result<(), Error> { + ) -> Result<()> { self.notify_update(&status_sender, super::RunStatus::Starting); if self.reachable().await { @@ -176,7 +157,7 @@ impl BackgroundService for LocalServer { "Failed to reach server", ); - return Err(Error::ServerUnreachable); + return Err(Error::ServerUnreachable.into()); } } } diff --git a/linkup-cli/src/services/mod.rs b/linkup-cli/src/services/mod.rs index 3a8d98da..fab8f2b7 100644 --- a/linkup-cli/src/services/mod.rs +++ b/linkup-cli/src/services/mod.rs @@ -1,23 +1,21 @@ -use std::fs::{self, File}; -use std::path::Path; use std::{fmt::Display, sync}; -use sysinfo::{get_current_pid, ProcessRefreshKind, RefreshKind, System}; +use sysinfo::{ProcessRefreshKind, RefreshKind, System}; use thiserror::Error; -mod caddy; mod cloudflare_tunnel; -mod dnsmasq; +#[cfg(target_os = "macos")] +mod local_dns_server; mod local_server; +#[cfg(target_os = "macos")] +pub use local_dns_server::LocalDnsServer; pub use local_server::LocalServer; pub use sysinfo::{Pid, Signal}; -pub use {caddy::is_installed as is_caddy_installed, caddy::Caddy}; pub use { cloudflare_tunnel::is_installed as is_cloudflared_installed, cloudflare_tunnel::CloudflareTunnel, }; -pub use {dnsmasq::is_installed as is_dnsmasq_installed, dnsmasq::Dnsmasq}; use crate::local_config::LocalState; @@ -49,14 +47,15 @@ pub struct RunUpdate { pub details: Option, } -pub trait BackgroundService { +pub trait BackgroundService { + const ID: &str; const NAME: &str; async fn run_with_progress( &self, local_state: &mut LocalState, status_sender: sync::mpsc::Sender, - ) -> Result<(), E>; + ) -> anyhow::Result<()>; fn notify_update(&self, status_sender: &sync::mpsc::Sender, status: RunStatus) { status_sender @@ -92,40 +91,25 @@ pub enum PidError { BadPidFile(String), } -fn get_pid(file_path: &Path) -> Result { - if let Err(e) = File::open(file_path) { - return Err(PidError::NoPidFile(e.to_string())); - } - - match fs::read_to_string(file_path) { - Ok(content) => { - let pid_u32 = content - .trim() - .parse::() - .map_err(|e| PidError::BadPidFile(e.to_string()))?; - - Ok(Pid::from_u32(pid_u32)) +pub fn find_service_pid(service_id: &str) -> Option { + for (pid, process) in system().processes() { + if process + .environ() + .iter() + .any(|item| item.to_string_lossy() == format!("LINKUP_SERVICE_ID={service_id}")) + { + return Some(*pid); } - Err(e) => Err(PidError::BadPidFile(e.to_string())), } -} -// Get the pid from a pidfile, but only return Some in case the pidfile is valid and the written pid on the file -// is running. -pub fn get_running_pid(file_path: &Path) -> Option { - let pid = match get_pid(file_path) { - Ok(pid) => pid, - Err(_) => return None, - }; - - system().process(pid).map(|_| pid) + None } -pub fn stop_pid_file(pid_file: &Path, signal: Signal) { - if let Some(pid) = get_running_pid(pid_file) { +pub fn stop_service(service_id: &str) { + if let Some(pid) = find_service_pid(service_id) { system() .process(pid) - .map(|process| process.kill_with(signal)); + .map(|process| process.kill_with(Signal::Interrupt)); } } @@ -134,7 +118,3 @@ pub fn system() -> System { RefreshKind::nothing().with_processes(ProcessRefreshKind::everything()), ) } - -pub fn get_current_process_pid() -> Pid { - get_current_pid().unwrap() -} diff --git a/linkup/src/versioning.rs b/linkup/src/versioning.rs index 51f340c6..2e383af6 100644 --- a/linkup/src/versioning.rs +++ b/linkup/src/versioning.rs @@ -6,38 +6,81 @@ pub enum VersionError { Parsing(String), } +#[derive(Debug, PartialEq, Eq)] +pub enum VersionChannel { + Stable, + Beta, +} + +impl Display for VersionChannel { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + VersionChannel::Stable => write!(f, "stable"), + VersionChannel::Beta => write!(f, "beta"), + } + } +} + #[derive(Debug, Clone)] pub struct Version { - major: u16, - minor: u16, - patch: u16, + pub major: u16, + pub minor: u16, + pub patch: u16, + pub pre_release: Option, +} + +impl Version { + pub fn channel(&self) -> VersionChannel { + match &self.pre_release { + Some(_) => VersionChannel::Beta, + None => VersionChannel::Stable, + } + } } impl PartialEq for Version { fn eq(&self, other: &Self) -> bool { - self.major == other.major && self.minor == other.minor && self.patch == other.patch + self.major == other.major + && self.minor == other.minor + && self.patch == other.patch + && self.pre_release == other.pre_release } } impl PartialOrd for Version { fn partial_cmp(&self, other: &Self) -> Option { - match ( - self.major.cmp(&other.major), - self.minor.cmp(&other.minor), - self.patch.cmp(&other.patch), - ) { - (std::cmp::Ordering::Equal, std::cmp::Ordering::Equal, ord) => Some(ord), - (std::cmp::Ordering::Equal, ord, _) => Some(ord), - (ord, _, _) => Some(ord), + match (&self.pre_release, &other.pre_release) { + (Some(a), Some(b)) => a.cmp(b).into(), + (Some(_), None) => { + // pre-release is always lower than stable + Some(std::cmp::Ordering::Less) + } + (None, Some(_)) => { + // stable is always higher than pre-release + Some(std::cmp::Ordering::Greater) + } + (None, None) => { + match ( + self.major.cmp(&other.major), + self.minor.cmp(&other.minor), + self.patch.cmp(&other.patch), + ) { + (std::cmp::Ordering::Equal, std::cmp::Ordering::Equal, ord) => Some(ord), + (std::cmp::Ordering::Equal, ord, _) => Some(ord), + (ord, _, _) => Some(ord), + } + } } } } impl Display for Version { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}.{}.{}", self.major, self.minor, self.patch)?; - - Ok(()) + if let Some(pre) = &self.pre_release { + write!(f, "{}.{}.{}-{}", self.major, self.minor, self.patch, pre) + } else { + write!(f, "{}.{}.{}", self.major, self.minor, self.patch) + } } } @@ -45,11 +88,20 @@ impl TryFrom<&str> for Version { type Error = VersionError; fn try_from(value: &str) -> Result { - let (major, minor, patch) = match value.split('.').collect::>()[..] { + let parts: Vec<&str> = value.split('-').collect(); + let version_part = parts[0]; + + let (major, minor, patch) = match version_part.split('.').collect::>()[..] { [major, minor, patch] => (major, minor, patch), _ => return Err(VersionError::Parsing(value.to_string())), }; + let pre_release = if parts.len() > 1 { + Some(parts[1..].join("-")) + } else { + None + }; + Ok(Self { major: major .parse::() @@ -60,6 +112,7 @@ impl TryFrom<&str> for Version { patch: patch .parse::() .map_err(|_| VersionError::Parsing(value.to_string()))?, + pre_release, }) } } @@ -136,14 +189,45 @@ mod tests { assert!(newer_version <= version); } + #[test] + fn test_pre_release_vs_stable() { + let pre_release_version = Version::try_from("0.0.0-next-20250317-abc123").unwrap(); + let stable_version = Version::try_from("1.2.3").unwrap(); + + assert!(stable_version > pre_release_version); + assert!(stable_version >= pre_release_version); + } + + #[test] + fn test_stable_vs_pre_release() { + let stable_version = Version::try_from("1.2.3").unwrap(); + let pre_release_version = Version::try_from("0.0.0-next-20250317-abc123").unwrap(); + + assert!(pre_release_version <= stable_version); + assert!(pre_release_version < stable_version); + } + #[test] fn test_display() { let version = Version { major: 1, minor: 2, patch: 3, + pre_release: None, }; assert_eq!(version.to_string(), "1.2.3"); } + + #[test] + fn test_display_pre_release() { + let version = Version { + major: 1, + minor: 2, + patch: 3, + pre_release: Some("next-20250317-abc123".into()), + }; + + assert_eq!(version.to_string(), "1.2.3-next-20250317-abc123"); + } } diff --git a/local-server/Cargo.toml b/local-server/Cargo.toml index 41b8e375..a2890748 100644 --- a/local-server/Cargo.toml +++ b/local-server/Cargo.toml @@ -9,15 +9,26 @@ path = "src/lib.rs" [dependencies] axum = { version = "0.8.1", features = ["http2", "json"] } +axum-server = { version = "0.7", features = ["tls-rustls"] } http = "1.2.0" -hyper = "1.5.2" -hyper-rustls = "0.27.5" +hickory-server = { version = "0.25.1", features = ["resolver"] } +hyper = { version = "1.5.2", features = ["server"] } +hyper-rustls = { version = "0.27.5", default-features = false, features = [ + "http2", + "ring", +] } hyper-util = { version = "0.1.10", features = ["client-legacy"] } futures = "0.3.31" linkup = { path = "../linkup" } rustls = { version = "0.23.21", default-features = false, features = ["ring"] } rustls-native-certs = "0.8.1" thiserror = "2.0.11" -tokio = { version = "1.43.0", features = ["macros", "signal"] } +tokio = { version = "1.43.0", features = [ + "macros", + "signal", + "rt-multi-thread", +] } tower-http = { version = "0.6.2", features = ["trace"] } tower = "0.5.2" +rcgen = { version = "0.13", features = ["x509-parser"] } +rustls-pemfile = "2.2.0" diff --git a/local-server/src/certificates/mod.rs b/local-server/src/certificates/mod.rs new file mode 100644 index 00000000..df8d34ba --- /dev/null +++ b/local-server/src/certificates/mod.rs @@ -0,0 +1,304 @@ +mod wildcard_sni_resolver; + +use rcgen::{Certificate, CertificateParams, DistinguishedName, DnType, KeyPair}; +use rustls::crypto::ring::sign; +use rustls::pki_types::CertificateDer; +use rustls::sign::CertifiedKey; +use std::{ + env, + fs::{self, File}, + io::BufReader, + path::{Path, PathBuf}, + process, +}; + +pub use wildcard_sni_resolver::WildcardSniResolver; + +const LINKUP_CA_COMMON_NAME: &str = "Linkup Local CA"; + +fn ca_cert_pem_path(certs_dir: &Path) -> PathBuf { + certs_dir.join("linkup_ca.cert.pem") +} + +fn ca_key_pem_path(certs_dir: &Path) -> PathBuf { + certs_dir.join("linkup_ca.key.pem") +} + +#[derive(Debug, thiserror::Error)] +pub enum BuildCertifiedKeyError { + #[error("Failed to read file: {0}")] + FileRead(#[from] std::io::Error), + #[error("File does not contain valid certificate")] + InvalidCertFile, + #[error("File does not contain valid private key")] + InvalidKeyFile, +} + +fn build_certified_key( + cert_path: &Path, + key_path: &Path, +) -> Result { + let mut cert_pem = BufReader::new(File::open(cert_path)?); + let mut key_pem = BufReader::new(File::open(key_path)?); + + let certs = rustls_pemfile::certs(&mut cert_pem) + .filter_map(|cert| cert.ok()) + .collect::>>(); + + if certs.is_empty() { + return Err(BuildCertifiedKeyError::InvalidCertFile); + } + + let key_der = rustls_pemfile::private_key(&mut key_pem) + .map_err(|_| BuildCertifiedKeyError::InvalidKeyFile)? + .ok_or(BuildCertifiedKeyError::InvalidCertFile)?; + + let signing_key = + sign::any_supported_type(&key_der).map_err(|_| BuildCertifiedKeyError::InvalidKeyFile)?; + + Ok(CertifiedKey { + cert: certs, + key: signing_key, + ocsp: None, + }) +} + +#[derive(Debug, thiserror::Error)] +pub enum SetupError { + #[error("Failed to create certificates directory '{0}': {1}")] + CreateCertsDir(PathBuf, String), + #[error("Missing NSS installation")] + MissingNSS, +} + +pub fn setup_self_signed_certificates( + certs_dir: &Path, + domains: &[String], +) -> Result<(), SetupError> { + if !certs_dir.exists() { + fs::create_dir_all(certs_dir).map_err(|error| { + SetupError::CreateCertsDir(certs_dir.to_path_buf(), error.to_string()) + })?; + } + + upsert_ca_cert(certs_dir); + add_ca_to_keychain(certs_dir); + + let ff_cert_storages = firefox_profiles_cert_storages(); + if !ff_cert_storages.is_empty() { + if !is_nss_installed() { + println!("It seems like you have Firefox installed."); + println!( + "For self-signed certificates to work with Firefox, you need to have nss installed." + ); + println!("You can find it on https://formulae.brew.sh/formula/nss."); + println!("Please install it and then try to install local-dns again."); + + return Err(SetupError::MissingNSS); + } + + add_ca_to_nss(certs_dir, &ff_cert_storages); + } + + for domain in domains { + create_domain_cert(certs_dir, &format!("*.{}", domain)); + } + + Ok(()) +} + +#[derive(Debug, thiserror::Error)] +pub enum UninstallError { + #[error("Failed to remove certs folder: {0}")] + RemoveCertsFolder(String), + #[error("Failed to remove CA certificate from keychain: {0}")] + DeleteCaCertificate(String), +} + +pub fn uninstall_self_signed_certificates(certs_dir: &Path) -> Result<(), UninstallError> { + if ca_exists_in_keychain() { + remove_ca_from_keychain()?; + } + + match std::fs::remove_dir_all(certs_dir) { + Ok(_) => Ok(()), + Err(error) => match error.kind() { + std::io::ErrorKind::NotFound => Ok(()), + _ => Err(UninstallError::RemoveCertsFolder(error.to_string())), + }, + } +} + +pub fn create_domain_cert(certs_dir: &Path, domain: &str) -> (Certificate, KeyPair) { + let cert_pem_str = fs::read_to_string(ca_cert_pem_path(certs_dir)).unwrap(); + let key_pem_str = fs::read_to_string(ca_key_pem_path(certs_dir)).unwrap(); + + let params = CertificateParams::from_ca_cert_pem(&cert_pem_str).unwrap(); + let ca_key = KeyPair::from_pem(&key_pem_str).unwrap(); + let ca_cert = params.self_signed(&ca_key).unwrap(); + + let mut params = CertificateParams::new(vec![domain.to_string()]).unwrap(); + params.distinguished_name = DistinguishedName::new(); + params.distinguished_name.push(DnType::CommonName, domain); + params.is_ca = rcgen::IsCa::NoCa; + + let key_pair = KeyPair::generate().unwrap(); + let cert = params.signed_by(&key_pair, &ca_cert, &ca_key).unwrap(); + + let escaped_domain = domain.replace("*", "wildcard_"); + let cert_path = certs_dir.join(format!("{}.cert.pem", &escaped_domain)); + let key_path = certs_dir.join(format!("{}.key.pem", &escaped_domain)); + fs::write(cert_path, cert.pem()).unwrap(); + fs::write(key_path, key_pair.serialize_pem()).unwrap(); + + (cert, key_pair) +} + +fn upsert_ca_cert(certs_dir: &Path) { + if ca_cert_pem_path(certs_dir).exists() && ca_key_pem_path(certs_dir).exists() { + return; + } + + let mut params = CertificateParams::new(Vec::new()).unwrap(); + params.is_ca = rcgen::IsCa::Ca(rcgen::BasicConstraints::Unconstrained); + params.key_usages = vec![ + rcgen::KeyUsagePurpose::KeyCertSign, + rcgen::KeyUsagePurpose::CrlSign, + ]; + + params + .distinguished_name + .push(rcgen::DnType::CommonName, LINKUP_CA_COMMON_NAME); + + let key_pair = KeyPair::generate().unwrap(); + let cert = params.self_signed(&key_pair).unwrap(); + + fs::write(ca_cert_pem_path(certs_dir), cert.pem()).unwrap(); + fs::write(ca_key_pem_path(certs_dir), key_pair.serialize_pem()).unwrap(); +} + +fn ca_exists_in_keychain() -> bool { + process::Command::new("sudo") + .arg("security") + .arg("find-certificate") + .arg("-c") + .arg(LINKUP_CA_COMMON_NAME) + .stdin(process::Stdio::null()) + .stdout(process::Stdio::null()) + .stderr(process::Stdio::null()) + .status() + .expect("Failed to find linkup CA") + .success() +} + +fn add_ca_to_keychain(certs_dir: &Path) { + process::Command::new("sudo") + .arg("security") + .arg("add-trusted-cert") + .arg("-d") + .arg("-r") + .arg("trustRoot") + .arg("-k") + .arg("/Library/Keychains/System.keychain") + .arg(ca_cert_pem_path(certs_dir)) + .stdin(process::Stdio::null()) + .stdout(process::Stdio::null()) + .stderr(process::Stdio::null()) + .status() + .expect("Failed to add CA to keychain"); +} + +fn remove_ca_from_keychain() -> Result<(), UninstallError> { + let status = process::Command::new("sudo") + .arg("security") + .arg("delete-certificate") + .arg("-t") + .arg("-c") + .arg(LINKUP_CA_COMMON_NAME) + .stdin(process::Stdio::null()) + .stdout(process::Stdio::null()) + .stderr(process::Stdio::null()) + .status() + .map_err(|error| UninstallError::DeleteCaCertificate(error.to_string()))?; + + if !status.success() { + return Err(UninstallError::DeleteCaCertificate( + "security command returned unsuccessful exit status".to_string(), + )); + } + + Ok(()) +} + +fn firefox_profiles_cert_storages() -> Vec { + let home = env::var("HOME").expect("Failed to get HOME env var"); + + match fs::read_dir(PathBuf::from(home).join("Library/Application Support/Firefox/Profiles")) { + Ok(dir) => dir + .filter_map(|entry| { + let entry = entry.expect("Failed to read Firefox profile dir entry entry"); + let path = entry.path(); + if path.is_dir() { + if path.join("cert9.db").exists() { + Some(format!("{}{}", "sql:", path.to_str().unwrap())) + } else if path.join("cert8.db").exists() { + Some(format!("{}{}", "dmb:", path.to_str().unwrap())) + } else { + None + } + } else { + None + } + }) + .collect::>(), + Err(error) => { + if !matches!(error.kind(), std::io::ErrorKind::NotFound) { + eprintln!("Failed to load Firefox profiles: {}", error); + } + + Vec::new() + } + } +} + +fn add_ca_to_nss(certs_dir: &Path, cert_storages: &[String]) { + if !is_nss_installed() { + println!("NSS not found, skipping CA installation"); + return; + } + + for cert_storage in cert_storages { + let result = process::Command::new("certutil") + .arg("-A") + .arg("-d") + .arg(cert_storage) + .arg("-t") + .arg("C,,") + .arg("-n") + .arg(LINKUP_CA_COMMON_NAME) + .arg("-i") + .arg(ca_cert_pem_path(certs_dir)) + .status(); + + if let Err(e) = result { + eprintln!("certutil failed to run for profile {}: {}", cert_storage, e); + } + } +} + +fn is_nss_installed() -> bool { + let res = process::Command::new("which") + .args(["certutil"]) + .stdout(process::Stdio::null()) + .stderr(process::Stdio::null()) + .stdin(process::Stdio::null()) + .status(); + + match res { + Ok(status) => status.success(), + Err(e) => { + eprintln!("Failed to check if certutil is installed: {}", e); + false + } + } +} diff --git a/local-server/src/certificates/wildcard_sni_resolver.rs b/local-server/src/certificates/wildcard_sni_resolver.rs new file mode 100644 index 00000000..109a58fc --- /dev/null +++ b/local-server/src/certificates/wildcard_sni_resolver.rs @@ -0,0 +1,98 @@ +use crate::certificates::build_certified_key; +use rustls::server::{ClientHello, ResolvesServerCert}; +use rustls::sign::CertifiedKey; +use std::collections::HashMap; +use std::fs; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, RwLock}; + +#[derive(Debug, thiserror::Error)] +pub enum WildcardSniResolverError { + #[error("Failed to read certs directory: {0}")] + ReadDir(#[from] std::io::Error), + + #[error("Failed to get file name")] + FileName, + + #[error("Error building certified key: {0}")] + LoadCert(#[from] crate::certificates::BuildCertifiedKeyError), +} + +#[derive(Debug)] +pub struct WildcardSniResolver { + certs: RwLock>>, +} + +impl WildcardSniResolver { + fn new() -> Self { + Self { + certs: RwLock::new(HashMap::new()), + } + } + + pub fn load_dir(certs_dir: &Path) -> Result { + let resolver = WildcardSniResolver::new(); + + let entries = match fs::read_dir(certs_dir) { + Ok(entries) => entries, + Err(error) => match error.kind() { + std::io::ErrorKind::NotFound => return Ok(resolver), + _ => return Err(error.into()), + }, + }; + + for entry in entries.flatten() { + let path = entry.path(); + if let Some(file_name) = path.file_name() { + let file_name = file_name.to_string_lossy(); + + if file_name.contains(".cert.pem") && !path.starts_with("linkup_ca") { + let domain_name = file_name.replace(".cert.pem", "").replace("wildcard_", "*"); + let key_path = + PathBuf::from(path.to_string_lossy().replace(".cert.pem", ".key.pem")); + + if key_path.exists() { + match build_certified_key(&path, &key_path) { + Ok(certified_key) => { + resolver.add_cert(&domain_name, certified_key); + } + Err(e) => { + eprintln!("Error loading cert/key for {domain_name}: {e}"); + } + } + } + } + } + } + + Ok(resolver) + } + + fn add_cert(&self, domain: &str, cert: CertifiedKey) { + let mut certs = self.certs.write().unwrap(); + certs.insert(domain.to_string(), Arc::new(cert)); + } +} + +impl ResolvesServerCert for WildcardSniResolver { + fn resolve(&self, client_hello: ClientHello<'_>) -> Option> { + if let Some(server_name) = client_hello.server_name() { + let certs = self.certs.read().unwrap(); + + if let Some(cert) = certs.get(server_name) { + return Some(cert.clone()); + } + + let parts: Vec<&str> = server_name.split('.').collect(); + + for i in 0..parts.len() { + let wildcard_domain = format!("*.{}", parts[i..].join(".")); + if let Some(cert) = certs.get(&wildcard_domain) { + return Some(cert.clone()); + } + } + } + + None + } +} diff --git a/local-server/src/lib.rs b/local-server/src/lib.rs index f2d97ba4..af483cbd 100644 --- a/local-server/src/lib.rs +++ b/local-server/src/lib.rs @@ -6,24 +6,46 @@ use axum::{ routing::{any, get, post}, Extension, Router, }; +use axum_server::tls_rustls::RustlsConfig; +use hickory_server::{ + authority::{Catalog, ZoneType}, + proto::{ + rr::{Name, RData, Record}, + xfer::Protocol, + }, + resolver::{ + config::{NameServerConfig, NameServerConfigGroup, ResolverOpts}, + name_server::TokioConnectionProvider, + }, + store::{ + forwarder::{ForwardAuthority, ForwardConfig}, + in_memory::InMemoryAuthority, + }, + ServerFuture, +}; use http::{header::HeaderMap, Uri}; use hyper_rustls::HttpsConnector; use hyper_util::{ client::legacy::{connect::HttpConnector, Client}, rt::{TokioExecutor, TokioIo}, }; - use linkup::{ allow_all_cors, get_additional_headers, get_target_service, MemoryStringStore, NameKind, Session, SessionAllocator, TargetService, UpdateSessionRequest, }; -use tokio::signal; +use rustls::ServerConfig; +use std::{ + net::{Ipv4Addr, SocketAddr}, + str::FromStr, +}; +use std::{path::Path, sync::Arc}; +use tokio::{net::UdpSocket, signal}; use tower::ServiceBuilder; use tower_http::trace::{DefaultOnRequest, DefaultOnResponse, TraceLayer}; -type HttpsClient = Client, Body>; +pub mod certificates; -const LINKUP_LOCALSERVER_PORT: u16 = 9066; +type HttpsClient = Client, Body>; #[derive(Debug)] struct ApiError { @@ -50,8 +72,7 @@ impl IntoResponse for ApiError { } } -pub fn linkup_router() -> Router { - let config_store = MemoryStringStore::default(); +pub fn linkup_router(config_store: MemoryStringStore) -> Router { let client = https_client(); Router::new() @@ -71,13 +92,43 @@ pub fn linkup_router() -> Router { ) } -pub async fn start_server() -> std::io::Result<()> { - let app = linkup_router(); +pub async fn start_server_https(config_store: MemoryStringStore, certs_dir: &Path) { + let _ = rustls::crypto::ring::default_provider().install_default(); + + let sni = match certificates::WildcardSniResolver::load_dir(certs_dir) { + Ok(sni) => sni, + Err(error) => { + eprintln!( + "Failed to load certificates from {:?} into SNI: {}", + certs_dir, error + ); + return; + } + }; + + let mut server_config = ServerConfig::builder() + .with_no_client_auth() + .with_cert_resolver(Arc::new(sni)); + server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()]; - let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", LINKUP_LOCALSERVER_PORT)) + let app = linkup_router(config_store); + + let addr = SocketAddr::from(([0, 0, 0, 0], 443)); + println!("listening on {}", &addr); + + axum_server::bind_rustls(addr, RustlsConfig::from_config(Arc::new(server_config))) + .serve(app.into_make_service()) .await - .unwrap(); - println!("listening on {}", listener.local_addr().unwrap()); + .expect("failed to start HTTPS server"); +} + +pub async fn start_server_http(config_store: MemoryStringStore) -> std::io::Result<()> { + let app = linkup_router(config_store); + + let addr = SocketAddr::from(([0, 0, 0, 0], 80)); + println!("listening on {}", &addr); + + let listener = tokio::net::TcpListener::bind(addr).await?; axum::serve(listener, app) .with_graceful_shutdown(shutdown_signal()) .await?; @@ -85,9 +136,47 @@ pub async fn start_server() -> std::io::Result<()> { Ok(()) } -#[tokio::main] -pub async fn local_linkup_main() -> std::io::Result<()> { - start_server().await +pub async fn start_dns_server(linkup_session_name: String, domains: Vec) { + let mut catalog = Catalog::new(); + + for domain in &domains { + let record_name = Name::from_str(&format!("{linkup_session_name}.{domain}.")).unwrap(); + + let authority = InMemoryAuthority::empty(record_name.clone(), ZoneType::Primary, false); + + let record = Record::from_rdata( + record_name.clone(), + 3600, + RData::A(Ipv4Addr::new(127, 0, 0, 1).into()), + ); + + authority.upsert(record, 0).await; + + catalog.upsert(record_name.clone().into(), vec![Arc::new(authority)]); + } + + let cf_name_server = NameServerConfig::new("1.1.1.1:53".parse().unwrap(), Protocol::Udp); + let forward_config = ForwardConfig { + name_servers: NameServerConfigGroup::from(vec![cf_name_server]), + options: Some(ResolverOpts::default()), + }; + + let forwarder = + ForwardAuthority::builder_with_config(forward_config, TokioConnectionProvider::default()) + .with_origin(Name::root()) + .build() + .unwrap(); + + catalog.upsert(Name::root().into(), vec![Arc::new(forwarder)]); + + let addr = SocketAddr::from(([0, 0, 0, 0], 8053)); + let sock = UdpSocket::bind(&addr).await.unwrap(); + + let mut server = ServerFuture::new(catalog); + server.register_socket(sock); + + println!("listening on {addr}"); + server.block_until_done().await.unwrap(); } async fn linkup_request_handler( @@ -98,7 +187,19 @@ async fn linkup_request_handler( let sessions = SessionAllocator::new(&store); let headers: linkup::HeaderMap = req.headers().into(); - let url = format!("http://localhost:{}{}", LINKUP_LOCALSERVER_PORT, req.uri()); + let url = if req.uri().scheme().is_some() { + req.uri().to_string() + } else { + format!( + "http://{}{}", + req.headers() + .get(http::header::HOST) + .and_then(|h| h.to_str().ok()) + .unwrap_or("localhost"), + req.uri() + ) + }; + let (session_name, config) = match sessions.get_request_session(&url, &headers).await { Ok(session) => session, Err(_) => { @@ -106,7 +207,7 @@ async fn linkup_request_handler( "Linkup was unable to determine the session origin of the request. Ensure that your request includes a valid session identifier in the referer or tracestate headers. - Local Server".to_string(), StatusCode::UNPROCESSABLE_ENTITY, ) - .into_response() + .into_response() } }; @@ -117,7 +218,7 @@ async fn linkup_request_handler( "The request belonged to a session, but there was no target for the request. Check that the routing rules in your linkup config have a match for this request. - Local Server".to_string(), StatusCode::NOT_FOUND, ) - .into_response() + .into_response() } }; @@ -141,12 +242,16 @@ async fn handle_http_req( extra_headers: linkup::HeaderMap, client: HttpsClient, ) -> Response { - *req.uri_mut() = Uri::try_from(target_service.url).unwrap(); + *req.uri_mut() = Uri::try_from(&target_service.url).unwrap(); let extra_http_headers: HeaderMap = extra_headers.into(); req.headers_mut().extend(extra_http_headers); // Request uri and host headers should not conflict req.headers_mut().remove(http::header::HOST); + if target_service.url.starts_with("http://") { + *req.version_mut() = http::Version::HTTP_11; + } + // Send the modified request to the target service. let mut resp = match client.request(req).await { Ok(resp) => resp, @@ -340,6 +445,7 @@ fn https_client() -> HttpsClient { .with_tls_config(tls) .https_or_http() .enable_http1() + .enable_http2() .build(); Client::builder(TokioExecutor::new()).build(https) diff --git a/server-tests/tests/helpers.rs b/server-tests/tests/helpers.rs index 6a498490..b44d40a0 100644 --- a/server-tests/tests/helpers.rs +++ b/server-tests/tests/helpers.rs @@ -1,6 +1,6 @@ use std::process::Command; -use linkup::{StorableDomain, StorableService, UpdateSessionRequest}; +use linkup::{MemoryStringStore, StorableDomain, StorableService, UpdateSessionRequest}; use linkup_local_server::linkup_router; use reqwest::Url; use tokio::net::TcpListener; @@ -14,7 +14,7 @@ pub enum ServerKind { pub async fn setup_server(kind: ServerKind) -> String { match kind { ServerKind::Local => { - let app = linkup_router(); + let app = linkup_router(MemoryStringStore::default()); // Bind to a random port assigned by the OS let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); diff --git a/worker/src/lib.rs b/worker/src/lib.rs index f140cd5a..b59edfdb 100644 --- a/worker/src/lib.rs +++ b/worker/src/lib.rs @@ -11,7 +11,7 @@ use http_error::HttpError; use kv_store::CfWorkerStringStore; use linkup::{ allow_all_cors, get_additional_headers, get_target_service, CreatePreviewRequest, NameKind, - Session, SessionAllocator, UpdateSessionRequest, Version, + Session, SessionAllocator, UpdateSessionRequest, Version, VersionChannel, }; use serde::{Deserialize, Serialize}; use tower_service::Service; @@ -302,7 +302,7 @@ async fn linkup_request_handler( Ok(session) => session, Err(_) => { return HttpError::new( - "Linkup was unable to determine the session origin of the request. + "Linkup was unable to determine the session origin of the request. Make sure your request includes a valid session ID in the referer or tracestate headers. - Local Server".to_string(), StatusCode::UNPROCESSABLE_ENTITY, ) @@ -314,7 +314,7 @@ async fn linkup_request_handler( Some(result) => result, None => { return HttpError::new( - "The request belonged to a session, but there was no target for the request. + "The request belonged to a session, but there was no target for the request. Check your routing rules in the linkup config for a match. - Local Server" .to_string(), StatusCode::NOT_FOUND, @@ -541,7 +541,9 @@ async fn authenticate( match headers.get("x-linkup-version") { Some(value) => match Version::try_from(value.to_str().unwrap()) { Ok(client_version) => { - if client_version < state.min_supported_client_version { + if client_version < state.min_supported_client_version + && client_version.channel() != VersionChannel::Beta + { return ( StatusCode::UNAUTHORIZED, "Your Linkup CLI is outdated, please upgrade to the latest version.",