diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e0edce2..12a4a85 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,10 +1,12 @@ +# This file was autogenerated by dist: https://opensource.axo.dev/cargo-dist/ +# # Copyright 2022-2024, axodotdev # SPDX-License-Identifier: MIT or Apache-2.0 # # CI that: # # * checks for a Git Tag that looks like a release -# * builds artifacts with cargo-dist (archives, installers, hashes) +# * builds artifacts with dist (archives, installers, hashes) # * uploads those artifacts to temporary workflow zip # * on success, uploads the artifacts to a GitHub Release # @@ -22,10 +24,10 @@ permissions: # must be a Cargo-style SemVer Version (must have at least major.minor.patch). # # If PACKAGE_NAME is specified, then the announcement will be for that -# package (erroring out if it doesn't have the given version or isn't cargo-dist-able). +# package (erroring out if it doesn't have the given version or isn't dist-able). # # If PACKAGE_NAME isn't specified, then the announcement will be for all -# (cargo-dist-able) packages in the workspace with that version (this mode is +# (dist-able) packages in the workspace with that version (this mode is # intended for workspaces with only one dist-able package, or with all dist-able # packages versioned/released in lockstep). # @@ -43,9 +45,9 @@ on: - '**[0-9]+.[0-9]+.[0-9]+*' jobs: - # Run 'cargo dist plan' (or host) to determine what tasks we need to do + # Run 'dist plan' (or host) to determine what tasks we need to do plan: - runs-on: "ubuntu-20.04" + runs-on: "ubuntu-latest" outputs: val: ${{ steps.plan.outputs.manifest }} tag: ${{ !github.event.pull_request && github.ref_name || '' }} @@ -57,16 +59,16 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Install cargo-dist + - name: Install dist # we specify bash to get pipefail; it guards against the `curl` command # failing. otherwise `sh` won't catch that `curl` returned non-0 shell: bash - run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.18.0/cargo-dist-installer.sh | sh" - - name: Cache cargo-dist + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.28.0/cargo-dist-installer.sh | sh" + - name: Cache dist uses: actions/upload-artifact@v4 with: name: cargo-dist-cache - path: ~/.cargo/bin/cargo-dist + path: ~/.cargo/bin/dist # sure would be cool if github gave us proper conditionals... # so here's a doubly-nested ternary-via-truthiness to try to provide the best possible # functionality based on whether this is a pull_request, and whether it's from a fork. @@ -74,8 +76,8 @@ jobs: # but also really annoying to build CI around when it needs secrets to work right.) - id: plan run: | - cargo dist ${{ (!github.event.pull_request && format('host --steps=create --tag={0}', github.ref_name)) || 'plan' }} --output-format=json > plan-dist-manifest.json - echo "cargo dist ran successfully" + dist ${{ (!github.event.pull_request && format('host --steps=create --tag={0}', github.ref_name)) || 'plan' }} --allow-dirty --output-format=json > plan-dist-manifest.json + echo "dist ran successfully" cat plan-dist-manifest.json echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT" - name: "Upload dist-manifest.json" @@ -93,18 +95,19 @@ jobs: if: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix.include != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }} strategy: fail-fast: false - # Target platforms/runners are computed by cargo-dist in create-release. + # Target platforms/runners are computed by dist in create-release. # Each member of the matrix has the following arguments: # # - runner: the github runner - # - dist-args: cli flags to pass to cargo dist - # - install-dist: expression to run to install cargo-dist on the runner + # - dist-args: cli flags to pass to dist + # - install-dist: expression to run to install dist on the runner # # Typically there will be: # - 1 "global" task that builds universal installers # - N "local" tasks that build each platform's binaries and platform-specific installers matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }} runs-on: ${{ matrix.runner }} + container: ${{ matrix.container && matrix.container.image || null }} env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} BUILD_MANIFEST_NAME: target/distrib/${{ join(matrix.targets, '-') }}-dist-manifest.json @@ -115,8 +118,15 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Install cargo-dist - run: ${{ matrix.install_dist }} + - name: Install Rust non-interactively if not already installed + if: ${{ matrix.container }} + run: | + if ! command -v cargo > /dev/null 2>&1; then + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + fi + - name: Install dist + run: ${{ matrix.install_dist.run }} # Get the dist-manifest - name: Fetch local artifacts uses: actions/download-artifact@v4 @@ -130,8 +140,8 @@ jobs: - name: Build artifacts run: | # Actually do builds and make zips and whatnot - cargo dist build ${{ needs.plan.outputs.tag-flag }} --print=linkage --output-format=json ${{ matrix.dist_args }} > dist-manifest.json - echo "cargo dist ran successfully" + dist build ${{ needs.plan.outputs.tag-flag }} --print=linkage --output-format=json ${{ matrix.dist_args }} > dist-manifest.json + echo "dist ran successfully" - id: cargo-dist name: Post-build # We force bash here just because github makes it really hard to get values up @@ -141,7 +151,7 @@ jobs: run: | # Parse out what we just built and upload it to scratch storage echo "paths<> "$GITHUB_OUTPUT" - jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT" + dist print-upload-files-from-manifest --manifest dist-manifest.json >> "$GITHUB_OUTPUT" echo "EOF" >> "$GITHUB_OUTPUT" cp dist-manifest.json "$BUILD_MANIFEST_NAME" @@ -158,7 +168,7 @@ jobs: needs: - plan - build-local-artifacts - runs-on: "ubuntu-20.04" + runs-on: "ubuntu-latest" env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json @@ -166,12 +176,12 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Install cached cargo-dist + - name: Install cached dist uses: actions/download-artifact@v4 with: name: cargo-dist-cache path: ~/.cargo/bin/ - - run: chmod +x ~/.cargo/bin/cargo-dist + - run: chmod +x ~/.cargo/bin/dist # Get all the local artifacts for the global tasks to use (for e.g. checksums) - name: Fetch local artifacts uses: actions/download-artifact@v4 @@ -182,8 +192,8 @@ jobs: - id: cargo-dist shell: bash run: | - cargo dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json - echo "cargo dist ran successfully" + dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json + echo "dist ran successfully" # Parse out what we just built and upload it to scratch storage echo "paths<> "$GITHUB_OUTPUT" @@ -208,19 +218,19 @@ jobs: if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.build-local-artifacts.result == 'skipped' || needs.build-local-artifacts.result == 'success') }} env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - runs-on: "ubuntu-20.04" + runs-on: "ubuntu-latest" outputs: val: ${{ steps.host.outputs.manifest }} steps: - uses: actions/checkout@v4 with: submodules: recursive - - name: Install cached cargo-dist + - name: Install cached dist uses: actions/download-artifact@v4 with: name: cargo-dist-cache path: ~/.cargo/bin/ - - run: chmod +x ~/.cargo/bin/cargo-dist + - run: chmod +x ~/.cargo/bin/dist # Fetch artifacts from scratch-storage - name: Fetch artifacts uses: actions/download-artifact@v4 @@ -231,7 +241,7 @@ jobs: - id: host shell: bash run: | - cargo dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json + dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json echo "artifacts uploaded and released successfully" cat dist-manifest.json echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" @@ -268,7 +278,7 @@ jobs: needs: - plan - host - runs-on: "ubuntu-20.04" + runs-on: "ubuntu-latest" env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} PLAN: ${{ needs.plan.outputs.val }} @@ -299,6 +309,11 @@ jobs: name=$(echo "$filename" | sed "s/\.rb$//") version=$(echo "$release" | jq .app_version --raw-output) + export PATH="/home/linuxbrew/.linuxbrew/bin:$PATH" + brew update + # We avoid reformatting user-provided data such as the app description and homepage. + brew style --except-cops FormulaAudit/Homepage,FormulaAudit/Desc,FormulaAuditStrict --fix "Formula/${filename}" || true + git add "Formula/${filename}" git commit -m "${name} ${version}" done @@ -313,7 +328,7 @@ jobs: # still allowing individual publish jobs to skip themselves (for prereleases). # "host" however must run to completion, no skipping allowed! if: ${{ always() && needs.host.result == 'success' && (needs.publish-homebrew-formula.result == 'skipped' || needs.publish-homebrew-formula.result == 'success') }} - runs-on: "ubuntu-20.04" + runs-on: "ubuntu-latest" env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: diff --git a/Cargo.toml b/Cargo.toml index f11a236..809b1b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ members = [ ] [workspace.package] -version = "0.6.0" +version = "0.7.0" authors = ["shellrow "] [workspace.dependencies] @@ -26,26 +26,4 @@ rangemap = "1.5" [profile.dist] inherits = "release" lto = "thin" - -# Config for 'cargo dist' -[workspace.metadata.dist] -# The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax) -cargo-dist-version = "0.18.0" -# CI backends to support -ci = "github" -# The installers to generate for each app -#installers = ["shell", "homebrew", "powershell"] -installers = ["shell", "homebrew"] -# A GitHub repo to push Homebrew formulas to -tap = "shellrow/homebrew-tap-ntap" -# Target platforms to build apps for (Rust target-triple syntax) -#targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-pc-windows-msvc"] -targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu"] -# Publish jobs to run in CI -publish-jobs = ["homebrew"] -# Publish jobs to run in CI -pr-run-mode = "plan" -# Whether to install an updater program -install-updater = false -include = ["resources/doc/USAGE.md"] -#features = ["bundle"] +allow-dirty = true diff --git a/README.md b/README.md index 793b7b0..c97ca4e 100644 --- a/README.md +++ b/README.md @@ -134,10 +134,3 @@ sudo chmod-bpf install ### License `ntap` is released under the MIT License. See the LICENSE file for more details. - -## Screenshots -![image](resources/ss/ntap-ss-stat.png) -![image](resources/ss/ntap-ss-monitor.png) -![image](resources/ss/ntap-ss-remote-hosts.png) -![image](resources/ss/ntap-ss-connections.png) -![image](resources/ss/ntap-ss-live.png) diff --git a/dist-workspace.toml b/dist-workspace.toml new file mode 100644 index 0000000..0d12070 --- /dev/null +++ b/dist-workspace.toml @@ -0,0 +1,31 @@ +[workspace] +members = ["cargo:."] + +# Config for 'dist' +[dist] +# The preferred dist version to use in CI (Cargo.toml SemVer syntax) +cargo-dist-version = "0.28.0" +# CI backends to support +ci = "github" +# The installers to generate for each app +installers = ["shell", "homebrew"] +#installers = ["shell", "homebrew", "powershell"] +# A GitHub repo to push Homebrew formulas to +tap = "shellrow/homebrew-tap-ntap" +# Target platforms to build apps for (Rust target-triple syntax) +targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu"] +#targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-pc-windows-msvc"] +# Publish jobs to run in CI +publish-jobs = ["homebrew"] +# Which actions to run on pull requests +pr-run-mode = "plan" +# Whether to install an updater program +install-updater = false +# Extra static files to include in each App (path relative to this Cargo.toml's dir) +include = ["resources/doc/USAGE.md"] +#features = ["bundle"] +# Path that installers should place binaries in +install-path = "CARGO_HOME" + +[dist.github-custom-runners] +runner = "ubuntu-latest" diff --git a/ntap-db/as/resources/as.bin b/ntap-db/as/resources/as.bin index 452442c..0f2c422 100644 Binary files a/ntap-db/as/resources/as.bin and b/ntap-db/as/resources/as.bin differ diff --git a/ntap-db/ipv4-asn/resources/ipv4-asn.bin b/ntap-db/ipv4-asn/resources/ipv4-asn.bin index 2847f4c..87923cd 100644 Binary files a/ntap-db/ipv4-asn/resources/ipv4-asn.bin and b/ntap-db/ipv4-asn/resources/ipv4-asn.bin differ diff --git a/ntap-db/ipv4-country/resources/ipv4-country.bin b/ntap-db/ipv4-country/resources/ipv4-country.bin index 2644505..8b1b6ad 100644 Binary files a/ntap-db/ipv4-country/resources/ipv4-country.bin and b/ntap-db/ipv4-country/resources/ipv4-country.bin differ diff --git a/ntap-db/ipv6-asn/resources/ipv6-asn.bin b/ntap-db/ipv6-asn/resources/ipv6-asn.bin index 1a5c91f..1785ef1 100644 Binary files a/ntap-db/ipv6-asn/resources/ipv6-asn.bin and b/ntap-db/ipv6-asn/resources/ipv6-asn.bin differ diff --git a/ntap-db/ipv6-country/resources/ipv6-country.bin b/ntap-db/ipv6-country/resources/ipv6-country.bin index 75c3e38..7e94177 100644 Binary files a/ntap-db/ipv6-country/resources/ipv6-country.bin and b/ntap-db/ipv6-country/resources/ipv6-country.bin differ diff --git a/ntap/Cargo.toml b/ntap/Cargo.toml index 567a54e..35066fc 100644 --- a/ntap/Cargo.toml +++ b/ntap/Cargo.toml @@ -19,8 +19,8 @@ bincode = { workspace = true } rangemap = { workspace = true } log = "0.4" simplelog = "0.12" -netdev = { version = "0.30", features = ["serde"] } -nex = { version = "0.18", features = ["serde"] } +netdev = { version = "0.34", features = ["serde"] } +nex = { version = "0.19", features = ["serde"] } tokio = { version = "1.38" } clap = { version = "4.5", features = ["cargo"] } crossterm = "0.27" @@ -32,31 +32,28 @@ ratatui = "0.25" comfy-table = "7.1" hickory-resolver = { version = "0.24" } futures = {version = "0.3"} -netsock = { version = "0.2", features = ["serde"] } +netsock = { version = "0.3", features = ["serde"] } reqwest = { version="0.12", default-features = false, features = ["json", "rustls-tls", "stream"] } chrono = { version = "0.4", features = ["serde"] } time = { version = "0.3", features = ["local-offset"] } -ipnet = "2.5" +ipnet = "2.11" ipstruct = "0.2" home = "0.5" termtree = "0.5" indicatif = "0.16" inquire = "0.6" -ntap-db-as = { path = "../ntap-db/as", version = "0.6.0", default-features = false} -ntap-db-country = { path = "../ntap-db/country", version = "0.6.0", default-features = false } -ntap-db-ipv4-asn = { path = "../ntap-db/ipv4-asn", version = "0.6.0", default-features = false } -ntap-db-ipv4-country = { path = "../ntap-db/ipv4-country", version = "0.6.0", default-features = false } -ntap-db-ipv6-asn = { path = "../ntap-db/ipv6-asn", version = "0.6.0", default-features = false } -ntap-db-ipv6-country = { path = "../ntap-db/ipv6-country", version = "0.6.0", default-features = false } -ntap-db-oui = { path = "../ntap-db/oui", version = "0.6.0", default-features = false } -ntap-db-tcp-service = { path = "../ntap-db/tcp-service", version = "0.6.0", default-features = false } -ntap-db-udp-service = { path = "../ntap-db/udp-service", version = "0.6.0", default-features = false } +ntap-db-as = { path = "../ntap-db/as", version = "0.7.0", default-features = false} +ntap-db-country = { path = "../ntap-db/country", version = "0.7.0", default-features = false } +ntap-db-ipv4-asn = { path = "../ntap-db/ipv4-asn", version = "0.7.0", default-features = false } +ntap-db-ipv4-country = { path = "../ntap-db/ipv4-country", version = "0.7.0", default-features = false } +ntap-db-ipv6-asn = { path = "../ntap-db/ipv6-asn", version = "0.7.0", default-features = false } +ntap-db-ipv6-country = { path = "../ntap-db/ipv6-country", version = "0.7.0", default-features = false } +ntap-db-oui = { path = "../ntap-db/oui", version = "0.7.0", default-features = false } +ntap-db-tcp-service = { path = "../ntap-db/tcp-service", version = "0.7.0", default-features = false } +ntap-db-udp-service = { path = "../ntap-db/udp-service", version = "0.7.0", default-features = false } [target.'cfg(windows)'.dependencies] winreg = "0.50" -sha2 = "0.10" -zip = "0.6" -privilege = "0.3" [features] default = [] diff --git a/ntap/src/deps/windows.rs b/ntap/src/deps/windows.rs index 2f17537..273c9eb 100644 --- a/ntap/src/deps/windows.rs +++ b/ntap/src/deps/windows.rs @@ -1,7 +1,5 @@ use super::DepsError; use crate::sys; -use privilege::runas::Command as RunasCommand; -use sha2::{Digest, Sha256}; use std::collections::HashMap; use std::error::Error; use std::fs::File; @@ -62,139 +60,3 @@ pub fn npcap_sdk_installed() -> bool { } false } - -/// Download npcap installer -pub fn download_npcap(dst_dir_path: String) -> Result<(), Box> { - let npcap_installer_url = format!("{}{}", NPCAP_DIST_BASE_URL, NPCAP_INSTALLER_FILENAME); - // Check and create download dir - let dir_path = std::path::Path::new(&dst_dir_path); - if !dir_path.exists() { - std::fs::create_dir_all(dir_path)?; - } - let npcap_target_path: std::path::PathBuf = dir_path.join(NPCAP_INSTALLER_FILENAME); - // Download npcap installer if not exists - if !std::path::Path::new(&npcap_target_path).exists() { - let mut response: reqwest::blocking::Response = - reqwest::blocking::get(&npcap_installer_url)?; - let mut file: File = File::create(&npcap_target_path)?; - response.copy_to(&mut file)?; - } - Ok(()) -} - -/// Verify npcap installer SHA256 checksum -pub fn verify_installer_checksum(file_path: &PathBuf) -> Result<(), Box> { - let mut file: File = File::open(&file_path)?; - let mut hasher = Sha256::new(); - std::io::copy(&mut file, &mut hasher)?; - let hash_result = hasher.finalize(); - let hash_result: String = format!("{:X}", hash_result); - - if hash_result != NPCAP_INSTALLER_HASH { - return Err(format!("Error: checksum failed... {}", hash_result).into()); - } - Ok(()) -} - -/// Run npcap installer. -/// -/// Warning: This function will run npcap installer with admin privileges. -/// -/// This function only run verified npcap installer. -pub fn run_npcap_installer(file_path: &PathBuf) -> Result<(), Box> { - // Check file exists - if !std::path::Path::new(&file_path).exists() { - return Err("Error: file not found...".into()); - } - // Verify checksum - verify_installer_checksum(file_path)?; - let exit_status: std::process::ExitStatus = RunasCommand::new(&file_path) - .arg("/loopback_support=yes") - .arg("/winpcap_mode=yes") - .run()?; - if !exit_status.success() { - return Err("Error: Npcap installation failed !".into()); - } - Ok(()) -} - -/// Download npcap SDK -pub fn download_npcap_sdk(dst_dir_path: String) -> Result<(), Box> { - let npcap_sdk_url = format!("{}{}", NPCAP_DIST_BASE_URL, NPCAP_SDK_FILENAME); - // Check and create download dir - let dir_path = std::path::Path::new(&dst_dir_path); - if !dir_path.exists() { - std::fs::create_dir_all(dir_path)?; - } - let npcap_sdk_target_path: std::path::PathBuf = dir_path.join(NPCAP_SDK_FILENAME); - // Download npcap sdk if not exists - if !std::path::Path::new(&npcap_sdk_target_path).exists() { - let mut response: reqwest::blocking::Response = reqwest::blocking::get(&npcap_sdk_url)?; - let mut file: File = File::create(&npcap_sdk_target_path)?; - response.copy_to(&mut file)?; - } - Ok(()) -} - -/// Verify npcap SDK SHA256 checksum -pub fn verify_sdk_checksum(file_path: &PathBuf) -> Result<(), Box> { - let mut file: File = File::open(&file_path)?; - let mut hasher = Sha256::new(); - std::io::copy(&mut file, &mut hasher)?; - let hash_result = hasher.finalize(); - let hash_result: String = format!("{:X}", hash_result); - - if hash_result != NPCAP_SDK_HASH { - return Err("Error: checksum failed...".into()); - } - Ok(()) -} - -/// Extract npcap SDK -pub fn extract_npcap_sdk(file_path: &PathBuf) -> Result> { - // Check file exists - if !std::path::Path::new(&file_path).exists() { - return Err("Error: file not found...".into()); - } - // Verify checksum - verify_sdk_checksum(file_path)?; - // Extract npcap SDK - let npcap_sdk_extract_dir: String = format!( - "{}\\{}", - sys::get_install_path(NPCAP_INSTALL_DIR_NAME), - NPCAP_SDK_DIR_NAME - ); - let npcap_sdk_extract_dir = std::path::PathBuf::from(npcap_sdk_extract_dir); - let mut archive: zip::ZipArchive = zip::ZipArchive::new(File::open(&file_path)?)?; - for i in 0..archive.len() { - let mut file: zip::read::ZipFile = archive.by_index(i)?; - let outpath: std::path::PathBuf = npcap_sdk_extract_dir.join(file.name()); - if (&*file.name()).ends_with('/') { - std::fs::create_dir_all(&outpath)?; - } else { - if let Some(p) = outpath.parent() { - if !p.exists() { - std::fs::create_dir_all(&p)?; - } - } - let mut outfile: File = std::fs::File::create(&outpath)?; - std::io::copy(&mut file, &mut outfile)?; - } - } - Ok(npcap_sdk_extract_dir) -} - -/// Add npcap SDK to LIB env var -pub fn add_npcap_sdk_to_lib(lib_dir_path: PathBuf) -> Result<(), Box> { - // Check lib dir exists - if !std::path::Path::new(&lib_dir_path).exists() { - return Err("Error: lib dir not found...".into()); - } - if !sys::check_env_lib_path(&lib_dir_path.to_str().unwrap()) { - match sys::add_env_lib_path(&lib_dir_path.to_str().unwrap()) { - Ok(_) => {} - Err(e) => Err(e)?, - } - } - Ok(()) -} diff --git a/ntap/src/handler/interface.rs b/ntap/src/handler/interface.rs index 59828a7..092e877 100644 --- a/ntap/src/handler/interface.rs +++ b/ntap/src/handler/interface.rs @@ -26,13 +26,13 @@ pub fn show_interfaces() -> Result<(), Box> { )); let mut ipv4_tree = Tree::new(node_label("IPv4 Addresses", None, None)); for ipv4 in &iface.ipv4 { - ipv4_tree.push(node_label(&ipv4.addr.to_string(), None, None)); + ipv4_tree.push(node_label(&ipv4.addr().to_string(), None, None)); } iface_tree.push(ipv4_tree); let mut ipv6_tree = Tree::new(node_label("IPv6 Addresses", None, None)); for ipv6 in &iface.ipv6 { - ipv6_tree.push(node_label(&ipv6.addr.to_string(), None, None)); + ipv6_tree.push(node_label(&ipv6.addr().to_string(), None, None)); } iface_tree.push(ipv6_tree); @@ -84,13 +84,13 @@ pub fn show_default_interface() -> Result<(), Box> { )); let mut ipv4_tree = Tree::new(node_label("IPv4 Addresses", None, None)); for ipv4 in &iface.ipv4 { - ipv4_tree.push(node_label(&ipv4.addr.to_string(), None, None)); + ipv4_tree.push(node_label(&ipv4.addr().to_string(), None, None)); } tree.push(ipv4_tree); let mut ipv6_tree = Tree::new(node_label("IPv6 Addresses", None, None)); for ipv6 in &iface.ipv6 { - ipv6_tree.push(node_label(&ipv6.addr.to_string(), None, None)); + ipv6_tree.push(node_label(&ipv6.addr().to_string(), None, None)); } tree.push(ipv6_tree); diff --git a/ntap/src/handler/route.rs b/ntap/src/handler/route.rs index 0e4058a..8db68ae 100644 --- a/ntap/src/handler/route.rs +++ b/ntap/src/handler/route.rs @@ -35,7 +35,7 @@ pub fn show_routes() -> Result<(), Box> { if iface.default { table.add_row(vec![ Cell::new(format!("{} (default)", &iface.name)), - Cell::new(&ipv4.addr), + Cell::new(&ipv4.addr()), Cell::new(Ipv4Addr::UNSPECIFIED), Cell::new(&ipv4.netmask()), Cell::new(ipv4_gateway), @@ -43,7 +43,7 @@ pub fn show_routes() -> Result<(), Box> { } else { table.add_row(vec![ Cell::new(&iface.name), - Cell::new(&ipv4.addr), + Cell::new(&ipv4.addr()), Cell::new(&ipv4.network()), Cell::new(&ipv4.netmask()), Cell::new(ipv4_gateway), @@ -51,11 +51,11 @@ pub fn show_routes() -> Result<(), Box> { } } } else { - if iface.if_type == InterfaceType::Loopback || iface.ipv4[0].addr == Ipv4Addr::LOCALHOST + if iface.if_type == InterfaceType::Loopback || iface.ipv4[0].addr() == Ipv4Addr::LOCALHOST { table.add_row(vec![ Cell::new(iface.name), - Cell::new(&iface.ipv4[0].addr), + Cell::new(&iface.ipv4[0].addr()), Cell::new(&iface.ipv4[0].network()), Cell::new(&iface.ipv4[0].netmask()), Cell::new(""), @@ -92,7 +92,7 @@ pub fn show_routes() -> Result<(), Box> { if iface.default { table.add_row(vec![ Cell::new(format!("{} (default)", &iface.name)), - Cell::new(&ipv6.addr), + Cell::new(&ipv6.addr()), Cell::new(Ipv6Addr::UNSPECIFIED), Cell::new(&ipv6.netmask()), Cell::new(ipv6_gateway), @@ -100,7 +100,7 @@ pub fn show_routes() -> Result<(), Box> { } else { table.add_row(vec![ Cell::new(&iface.name), - Cell::new(&ipv6.addr), + Cell::new(&ipv6.addr()), Cell::new(&ipv6.network()), Cell::new(&ipv6.netmask()), Cell::new(ipv6_gateway), @@ -108,11 +108,11 @@ pub fn show_routes() -> Result<(), Box> { } } } else { - if iface.if_type == InterfaceType::Loopback || iface.ipv6[0].addr == Ipv6Addr::LOCALHOST + if iface.if_type == InterfaceType::Loopback || iface.ipv6[0].addr() == Ipv6Addr::LOCALHOST { table.add_row(vec![ Cell::new(iface.name), - Cell::new(&iface.ipv6[0].addr), + Cell::new(&iface.ipv6[0].addr()), Cell::new(&iface.ipv6[0].network()), Cell::new(&iface.ipv6[0].netmask()), Cell::new(""), diff --git a/ntap/src/net/interface.rs b/ntap/src/net/interface.rs index 754181e..50ef1d9 100644 --- a/ntap/src/net/interface.rs +++ b/ntap/src/net/interface.rs @@ -8,12 +8,12 @@ use std::{ pub fn get_interface_by_ip(ip_addr: IpAddr) -> Option { for iface in nex::net::interface::get_interfaces() { for ip in iface.ipv4.clone() { - if ip.addr == ip_addr { + if ip.addr() == ip_addr { return Some(iface); } } for ip in iface.ipv6.clone() { - if ip.addr == ip_addr { + if ip.addr() == ip_addr { return Some(iface); } } @@ -41,15 +41,15 @@ pub fn get_interface_by_name(name: String) -> Option { pub fn get_interface_ipv4(iface: &Interface) -> Option { for ip in iface.ipv4.clone() { - return Some(IpAddr::V4(ip.addr)); + return Some(IpAddr::V4(ip.addr())); } return None; } pub fn get_interface_global_ipv6(iface: &Interface) -> Option { for ip in iface.ipv6.clone() { - if nex::net::ip::is_global_ipv6(&ip.addr) { - return Some(IpAddr::V6(ip.addr)); + if nex::net::ip::is_global_ipv6(&ip.addr()) { + return Some(IpAddr::V6(ip.addr())); } } return None; @@ -57,8 +57,8 @@ pub fn get_interface_global_ipv6(iface: &Interface) -> Option { pub fn get_interface_local_ipv6(iface: &Interface) -> Option { for ip in iface.ipv6.clone() { - if !nex::net::ip::is_global_ipv6(&ip.addr) { - return Some(IpAddr::V6(ip.addr)); + if !nex::net::ip::is_global_ipv6(&ip.addr()) { + return Some(IpAddr::V6(ip.addr())); } } return None; @@ -67,10 +67,10 @@ pub fn get_interface_local_ipv6(iface: &Interface) -> Option { pub fn get_interface_ips(iface: &Interface) -> Vec { let mut ips: Vec = Vec::new(); for ip in iface.ipv4.clone() { - ips.push(ip.addr.to_string()); + ips.push(ip.addr().to_string()); } for ip in iface.ipv6.clone() { - ips.push(ip.addr.to_string()); + ips.push(ip.addr().to_string()); } ips } @@ -79,10 +79,10 @@ pub fn get_local_ips(if_index: u32) -> HashSet { let interface = get_interface_by_index(if_index).unwrap(); let mut ips: HashSet = HashSet::new(); for ip in interface.ipv4.clone() { - ips.insert(IpAddr::V4(ip.addr)); + ips.insert(IpAddr::V4(ip.addr())); } for ip in interface.ipv6.clone() { - ips.insert(IpAddr::V6(ip.addr)); + ips.insert(IpAddr::V6(ip.addr())); } // localhost IP addresses ips.insert(IpAddr::V4(Ipv4Addr::LOCALHOST)); @@ -95,10 +95,10 @@ pub fn get_default_local_ips() -> HashSet { let default_interface = netdev::get_default_interface().unwrap(); let mut ips: HashSet = HashSet::new(); for ip in default_interface.ipv4.clone() { - ips.insert(IpAddr::V4(ip.addr)); + ips.insert(IpAddr::V4(ip.addr())); } for ip in default_interface.ipv6.clone() { - ips.insert(IpAddr::V6(ip.addr)); + ips.insert(IpAddr::V6(ip.addr())); } // localhost IP addresses ips.insert(IpAddr::V4(Ipv4Addr::LOCALHOST)); @@ -109,10 +109,10 @@ pub fn get_default_local_ips() -> HashSet { pub fn get_interface_local_ips(iface: &Interface) -> HashSet { let mut ips: HashSet = HashSet::new(); for ip in iface.ipv4.clone() { - ips.insert(IpAddr::V4(ip.addr)); + ips.insert(IpAddr::V4(ip.addr())); } for ip in iface.ipv6.clone() { - ips.insert(IpAddr::V6(ip.addr)); + ips.insert(IpAddr::V6(ip.addr())); } // localhost IP addresses ips.insert(IpAddr::V4(Ipv4Addr::LOCALHOST)); @@ -124,10 +124,10 @@ pub fn get_local_ip_map() -> HashMap { let mut ip_map: HashMap = HashMap::new(); for iface in nex::net::interface::get_interfaces() { for ip in iface.ipv4.clone() { - ip_map.insert(IpAddr::V4(ip.addr), iface.name.clone()); + ip_map.insert(IpAddr::V4(ip.addr()), iface.name.clone()); } for ip in iface.ipv6.clone() { - ip_map.insert(IpAddr::V6(ip.addr), iface.name.clone()); + ip_map.insert(IpAddr::V6(ip.addr()), iface.name.clone()); } } ip_map diff --git a/ntap/src/net/ip.rs b/ntap/src/net/ip.rs index 8a54ca3..f33d24c 100644 --- a/ntap/src/net/ip.rs +++ b/ntap/src/net/ip.rs @@ -5,11 +5,15 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; pub fn get_network_address(ip_addr: IpAddr) -> Result { match ip_addr { IpAddr::V4(ipv4_addr) => { - let net: Ipv4Net = Ipv4Net::new(ipv4_addr, 24); + let net: Ipv4Net = Ipv4Net::new(ipv4_addr, 24).map_err(|e| { + format!("Invalid IPv4 prefix length : {}", e.to_string()) + })?; Ok(net.network().to_string()) } IpAddr::V6(ipv6_addr) => { - let net: Ipv6Net = Ipv6Net::new(ipv6_addr, 24); + let net: Ipv6Net = Ipv6Net::new(ipv6_addr, 24).map_err(|e| { + format!("Invalid IPv6 prefix length: {}", e.to_string()) + })?; Ok(net.network().to_string()) } } diff --git a/ntap/src/sys/windows.rs b/ntap/src/sys/windows.rs index 048648c..9f228d3 100644 --- a/ntap/src/sys/windows.rs +++ b/ntap/src/sys/windows.rs @@ -188,31 +188,6 @@ pub fn check_deps() -> Result<(), Box> { if ans == false { return Err("On windows, Npcap is required for ntap to work properly. Please install Npcap and try again.".into()); } - // Download the latest release of npcap installer - if let Some(download_dir) = crate::sys::get_download_dir_path() { - let installer_path = download_npcap_with_progress(&download_dir)?; - println!( - "Npcap installer downloaded successfully: {}", - installer_path.to_string_lossy() - ); - // Install npcap - println!("Installing Npcap ..."); - // Verify the checksum of the downloaded npcap installer - match crate::deps::verify_installer_checksum(&installer_path) { - Ok(_) => println!("Npcap installer checksum is correct !"), - Err(e) => { - println!("{}", e); - } - } - // Install npcap - match crate::deps::run_npcap_installer(&installer_path) { - Ok(_) => println!("Npcap installed successfully !"), - Err(e) => { - println!("{}", e); - } - } - println!("Npcap installed successfully."); - } } } crate::deps::DepsError::Unknown(s) => { @@ -223,45 +198,3 @@ pub fn check_deps() -> Result<(), Box> { } Ok(()) } - -/// Download npcap installer with progress -pub fn download_npcap_with_progress( - dst_dir_path: &PathBuf, -) -> Result> { - let npcap_installer_url = format!("{}{}", NPCAP_DIST_BASE_URL, NPCAP_INSTALLER_FILENAME); - // Check and create download dir - if !dst_dir_path.exists() { - std::fs::create_dir_all(&dst_dir_path)?; - } - let npcap_target_path: std::path::PathBuf = dst_dir_path.join(NPCAP_INSTALLER_FILENAME); - // Download npcap installer if not exists - if std::path::Path::new(&npcap_target_path).exists() { - return Ok(npcap_target_path); - } - let rt = tokio::runtime::Runtime::new().unwrap(); - let installer_save_path: PathBuf = npcap_target_path.clone(); - rt.block_on(async { - // create a channel for progress - let (progress_tx, mut progress_rx) = tokio::sync::mpsc::channel(100); - // spawn a task to handle the progress - tokio::spawn(async move { - let _ = crate::net::http::download_file_with_progress(npcap_installer_url, installer_save_path, progress_tx).await; - }); - // Display progress with indicatif - let bar = indicatif::ProgressBar::new(1000); - bar.set_style(indicatif::ProgressStyle::default_bar().template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({eta})").progress_chars("#>-")); - while let Some(progress) = progress_rx.recv().await { - match progress { - crate::net::http::DownloadProgress::ContentLength(content_length) => { - println!("Content-Length: {}", content_length); - bar.set_length(content_length); - } - crate::net::http::DownloadProgress::Downloaded(downloaded) => { - bar.set_position(downloaded); - } - } - } - bar.finish(); - }); - Ok(npcap_target_path) -} diff --git a/ntap/src/tui/live/app.rs b/ntap/src/tui/live/app.rs index f37ad84..0b710d4 100644 --- a/ntap/src/tui/live/app.rs +++ b/ntap/src/tui/live/app.rs @@ -21,7 +21,7 @@ pub struct TabsState<'a> { } impl<'a> TabsState<'a> { - pub fn new(titles: Vec<&'a str>) -> TabsState { + pub fn new(titles: Vec<&'a str>) -> TabsState<'a> { TabsState { titles, index: 0 } } pub fn next(&mut self) { diff --git a/ntap/src/tui/monitor/app.rs b/ntap/src/tui/monitor/app.rs index f0b0b04..cdbed5c 100644 --- a/ntap/src/tui/monitor/app.rs +++ b/ntap/src/tui/monitor/app.rs @@ -18,7 +18,7 @@ pub struct TabsState<'a> { } impl<'a> TabsState<'a> { - pub fn new(titles: Vec<&'a str>) -> TabsState { + pub fn new(titles: Vec<&'a str>) -> TabsState<'a> { TabsState { titles, index: 0 } } pub fn next(&mut self) { diff --git a/ntap/src/tui/stat/app.rs b/ntap/src/tui/stat/app.rs index 60ba5e3..647cf55 100644 --- a/ntap/src/tui/stat/app.rs +++ b/ntap/src/tui/stat/app.rs @@ -18,7 +18,7 @@ pub struct TabsState<'a> { } impl<'a> TabsState<'a> { - pub fn new(titles: Vec<&'a str>) -> TabsState { + pub fn new(titles: Vec<&'a str>) -> TabsState<'a> { TabsState { titles, index: 0 } } pub fn next(&mut self) { diff --git a/resources/ss/ntap-ss-connections.png b/resources/ss/ntap-ss-connections.png deleted file mode 100644 index 315511c..0000000 Binary files a/resources/ss/ntap-ss-connections.png and /dev/null differ diff --git a/resources/ss/ntap-ss-live.png b/resources/ss/ntap-ss-live.png deleted file mode 100644 index ec7e600..0000000 Binary files a/resources/ss/ntap-ss-live.png and /dev/null differ diff --git a/resources/ss/ntap-ss-monitor.png b/resources/ss/ntap-ss-monitor.png deleted file mode 100644 index 8199826..0000000 Binary files a/resources/ss/ntap-ss-monitor.png and /dev/null differ diff --git a/resources/ss/ntap-ss-remote-hosts.png b/resources/ss/ntap-ss-remote-hosts.png deleted file mode 100644 index aa517a9..0000000 Binary files a/resources/ss/ntap-ss-remote-hosts.png and /dev/null differ diff --git a/resources/ss/ntap-ss-stat.png b/resources/ss/ntap-ss-stat.png deleted file mode 100644 index 6201f08..0000000 Binary files a/resources/ss/ntap-ss-stat.png and /dev/null differ