From 952a4263a9da95c4a0dc098650ae27b208515cd5 Mon Sep 17 00:00:00 2001 From: moth Date: Mon, 24 Nov 2025 12:59:04 -0700 Subject: [PATCH 1/5] Install Ansible with Pipx (#84) * Install Ansible using pipx instead of Linux package manager * Prepend ~/.local/bin to path if not present (needed on Ubuntu to find pipx-installed Ansible) * Install pipx in a virtual environment to avoid PEP 668 issues * Pin ansible-core version 2.15.3 to avoid deprecation warnings and breaking syntax changes * Pin ansible-core 2.15.13, force installation --- .../install_scripts/ansible-installer.sh | 28 +++++++++++++++---- installer/install_scripts/install_rita.sh | 3 ++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/installer/install_scripts/ansible-installer.sh b/installer/install_scripts/ansible-installer.sh index 6a18b8c..539c326 100755 --- a/installer/install_scripts/ansible-installer.sh +++ b/installer/install_scripts/ansible-installer.sh @@ -219,6 +219,24 @@ install_tool() { fi } +install_ansible() { + # install pipx package in a Python virtual environment (PEP 668 mitigation) + python3 -m venv .ansenv + source .ansenv/bin/activate + + python3 -m pip install pipx + + pipx ensurepath --prepend + + # install ansible and ansible-core via pipx + pipx install ansible ansible-core==2.15.13 --force + + deactivate + + # prepend ~/.local/bin to path if not present + [[ ":$PATH:" != *":$HOME/.local/bin:"* ]] && PATH="$HOME/.local/bin:${PATH}" +} + echo "ansible_installer version $ansible_installer_version" >&2 #FIXME We no longer need these choices, remove the following block @@ -272,6 +290,7 @@ else status "Installing needed tools" #================ install_tool python3 "python3" install_tool pip3 "python3-pip" #Note, oracle linux does not come with pip at all. The "python3-pip-wheel" package does not include pip. + install_tool venv "python3-venv" python3 -m pip -V ; retcode="$?" if [ "$retcode" != 0 ]; then fail "Unable to run python3's pip, exiting." @@ -281,7 +300,8 @@ else install_tool wget "wget" install_tool curl "curl" install_tool sha256sum "coreutils" - install_tool ansible "ansible ansible-core" + + install_ansible fi @@ -309,9 +329,7 @@ if ! echo "$PATH" | grep -q '/usr/local/bin' ; then fi fi -ansible-galaxy collection install community.docker --force - - - +# install requisite ansible collections +ansible-galaxy collection install community.general community.docker --force popd > /dev/null diff --git a/installer/install_scripts/install_rita.sh b/installer/install_scripts/install_rita.sh index 36ecbf4..89ed4f3 100755 --- a/installer/install_scripts/install_rita.sh +++ b/installer/install_scripts/install_rita.sh @@ -31,6 +31,9 @@ source ./scripts/helper.sh ./scripts/ansible-installer.sh +# prepend ~/.local/bin to path if not present +[[ ":$PATH:" != *":$HOME/.local/bin:"* ]] && PATH="$HOME/.local/bin:${PATH}" + status "Installing rita via ansible on $install_target" #================ if [ "$install_target" = "localhost" -o "$install_target" = "127.0.0.1" -o "$install_target" = "::1" ]; then From fd1bbe326310641720970815af0d0a41313390cd Mon Sep 17 00:00:00 2001 From: Naomi Kramer Date: Wed, 3 Dec 2025 15:54:03 -0500 Subject: [PATCH 2/5] Disable permission flag checks on imported logs (#82) * Disable permission flag checks on imported logs * Display individual log errors even if there are no viable logs --- cmd/import.go | 15 ++++++++------ cmd/import_test.go | 50 +++++++++++++++++++++++++++------------------- 2 files changed, 39 insertions(+), 26 deletions(-) diff --git a/cmd/import.go b/cmd/import.go index 4299ddc..33cbf8e 100644 --- a/cmd/import.go +++ b/cmd/import.go @@ -161,15 +161,19 @@ func RunImportCmd(startTime time.Time, cfg *config.Config, afs afero.Fs, logDir // get list of hourly log maps of all days of log files in directory logMap, walkErrors, err := WalkFiles(afs, logDir, db.Rolling) - if err != nil { - return importResults, err - } - // log any errors that occurred during the walk + // log any errors that occurred during the walk, before returning + // this is especially useful when all files in the directory are invalid + // instead of only logging 'no valid files found' for _, walkErr := range walkErrors { logger.Debug().Str("path", walkErr.Path).Err(walkErr.Error).Msg("file was left out of import due to error or incompatibility") } + // return if the walk failed completely + if err != nil { + return importResults, err + } + var elapsedTime int64 // loop through each day @@ -425,8 +429,7 @@ func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []Wal } // check if the file is readable - _, err := afs.Open(path) - if err != nil || !(info.Mode().Perm()&0444 == 0444) { + if _, err := afs.Open(path); err != nil { walkErrors = append(walkErrors, WalkError{Path: path, Error: ErrInsufficientReadPermissions}) return nil //nolint:nilerr // log the issue and continue walking } diff --git a/cmd/import_test.go b/cmd/import_test.go index 0ef1c65..09ff067 100644 --- a/cmd/import_test.go +++ b/cmd/import_test.go @@ -622,7 +622,6 @@ func createExpectedResults(logs []cmd.HourlyZeekLogs) []cmd.HourlyZeekLogs { } func TestWalkFiles(t *testing.T) { - afs := afero.NewMemMapFs() tests := []struct { name string @@ -1159,25 +1158,33 @@ func TestWalkFiles(t *testing.T) { }, expectedError: cmd.ErrNoValidFilesFound, }, - { - name: "No Read Permissions on Files", - directory: "/logs", - directoryPermissions: iofs.FileMode(0o775), - filePermissions: iofs.FileMode(0o000), - files: []string{ - "conn.log", "dns.log", "http.log", "ssl.log", "open_conn.log", "open_http.log", "open_ssl.log", - }, - expectedWalkErrors: []cmd.WalkError{ - {Path: "/logs/conn.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/dns.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/http.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/ssl.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/open_conn.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/open_http.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/open_ssl.log", Error: cmd.ErrInsufficientReadPermissions}, - }, - expectedError: cmd.ErrNoValidFilesFound, - }, + + // Previously, read permissions were checked with !(info.Mode().Perm()&0444 == 0444), but + // this requires all read permissions (user, group, others)/0644 to be set which is not ideal. + // A better check would be to see if any read permission is set, i.e., (info.Mode().Perm()&0444 != 0). + // However, since some ACL systems/SELinux might interfere with this, it's better to let the Open() call + // return an error if permission is denied. + // Unfortunately, afero.MemMapFs does not support file permissions when using Open, so this test is skipped. + // https://github.com/spf13/afero/issues/150 + // { + // name: "No Read Permissions on Files", + // directory: "/logs", + // directoryPermissions: iofs.FileMode(0o775), + // filePermissions: iofs.FileMode(0o000), + // files: []string{ + // "conn.log", "dns.log", "http.log", "ssl.log", "open_conn.log", "open_http.log", "open_ssl.log", + // }, + // expectedWalkErrors: []cmd.WalkError{ + // {Path: "/logs/conn.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/dns.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/http.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/ssl.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/open_conn.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/open_http.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/open_ssl.log", Error: cmd.ErrInsufficientReadPermissions}, + // }, + // expectedError: cmd.ErrNoValidFilesFound, + // }, { name: "No Files, Only SubDirectories", directory: "/logs", @@ -1217,6 +1224,8 @@ func TestWalkFiles(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // create a new in-memory filesystem for each test + afs := afero.NewMemMapFs() // Create the directory if test.directory != "" { @@ -1298,6 +1307,7 @@ func TestWalkFiles(t *testing.T) { if test.expectedError == nil { require.NoError(t, err, "running WalkFiles should not produce an error") } else { + require.Error(t, err, "running WalkFiles should produce an error") require.ErrorIs(t, err, test.expectedError, "error should match expected value") } From 63b6e3d6e7fb55df4f9d43c618111ce3b9514a86 Mon Sep 17 00:00:00 2001 From: Liza Tsibur Date: Tue, 9 Dec 2025 13:22:09 -0700 Subject: [PATCH 3/5] Revert "Disable permission flag checks on imported logs (#82)" (#85) This reverts commit fd1bbe326310641720970815af0d0a41313390cd. --- cmd/import.go | 15 ++++++-------- cmd/import_test.go | 50 +++++++++++++++++++--------------------------- 2 files changed, 26 insertions(+), 39 deletions(-) diff --git a/cmd/import.go b/cmd/import.go index 33cbf8e..4299ddc 100644 --- a/cmd/import.go +++ b/cmd/import.go @@ -161,19 +161,15 @@ func RunImportCmd(startTime time.Time, cfg *config.Config, afs afero.Fs, logDir // get list of hourly log maps of all days of log files in directory logMap, walkErrors, err := WalkFiles(afs, logDir, db.Rolling) + if err != nil { + return importResults, err + } - // log any errors that occurred during the walk, before returning - // this is especially useful when all files in the directory are invalid - // instead of only logging 'no valid files found' + // log any errors that occurred during the walk for _, walkErr := range walkErrors { logger.Debug().Str("path", walkErr.Path).Err(walkErr.Error).Msg("file was left out of import due to error or incompatibility") } - // return if the walk failed completely - if err != nil { - return importResults, err - } - var elapsedTime int64 // loop through each day @@ -429,7 +425,8 @@ func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []Wal } // check if the file is readable - if _, err := afs.Open(path); err != nil { + _, err := afs.Open(path) + if err != nil || !(info.Mode().Perm()&0444 == 0444) { walkErrors = append(walkErrors, WalkError{Path: path, Error: ErrInsufficientReadPermissions}) return nil //nolint:nilerr // log the issue and continue walking } diff --git a/cmd/import_test.go b/cmd/import_test.go index 09ff067..0ef1c65 100644 --- a/cmd/import_test.go +++ b/cmd/import_test.go @@ -622,6 +622,7 @@ func createExpectedResults(logs []cmd.HourlyZeekLogs) []cmd.HourlyZeekLogs { } func TestWalkFiles(t *testing.T) { + afs := afero.NewMemMapFs() tests := []struct { name string @@ -1158,33 +1159,25 @@ func TestWalkFiles(t *testing.T) { }, expectedError: cmd.ErrNoValidFilesFound, }, - - // Previously, read permissions were checked with !(info.Mode().Perm()&0444 == 0444), but - // this requires all read permissions (user, group, others)/0644 to be set which is not ideal. - // A better check would be to see if any read permission is set, i.e., (info.Mode().Perm()&0444 != 0). - // However, since some ACL systems/SELinux might interfere with this, it's better to let the Open() call - // return an error if permission is denied. - // Unfortunately, afero.MemMapFs does not support file permissions when using Open, so this test is skipped. - // https://github.com/spf13/afero/issues/150 - // { - // name: "No Read Permissions on Files", - // directory: "/logs", - // directoryPermissions: iofs.FileMode(0o775), - // filePermissions: iofs.FileMode(0o000), - // files: []string{ - // "conn.log", "dns.log", "http.log", "ssl.log", "open_conn.log", "open_http.log", "open_ssl.log", - // }, - // expectedWalkErrors: []cmd.WalkError{ - // {Path: "/logs/conn.log", Error: cmd.ErrInsufficientReadPermissions}, - // {Path: "/logs/dns.log", Error: cmd.ErrInsufficientReadPermissions}, - // {Path: "/logs/http.log", Error: cmd.ErrInsufficientReadPermissions}, - // {Path: "/logs/ssl.log", Error: cmd.ErrInsufficientReadPermissions}, - // {Path: "/logs/open_conn.log", Error: cmd.ErrInsufficientReadPermissions}, - // {Path: "/logs/open_http.log", Error: cmd.ErrInsufficientReadPermissions}, - // {Path: "/logs/open_ssl.log", Error: cmd.ErrInsufficientReadPermissions}, - // }, - // expectedError: cmd.ErrNoValidFilesFound, - // }, + { + name: "No Read Permissions on Files", + directory: "/logs", + directoryPermissions: iofs.FileMode(0o775), + filePermissions: iofs.FileMode(0o000), + files: []string{ + "conn.log", "dns.log", "http.log", "ssl.log", "open_conn.log", "open_http.log", "open_ssl.log", + }, + expectedWalkErrors: []cmd.WalkError{ + {Path: "/logs/conn.log", Error: cmd.ErrInsufficientReadPermissions}, + {Path: "/logs/dns.log", Error: cmd.ErrInsufficientReadPermissions}, + {Path: "/logs/http.log", Error: cmd.ErrInsufficientReadPermissions}, + {Path: "/logs/ssl.log", Error: cmd.ErrInsufficientReadPermissions}, + {Path: "/logs/open_conn.log", Error: cmd.ErrInsufficientReadPermissions}, + {Path: "/logs/open_http.log", Error: cmd.ErrInsufficientReadPermissions}, + {Path: "/logs/open_ssl.log", Error: cmd.ErrInsufficientReadPermissions}, + }, + expectedError: cmd.ErrNoValidFilesFound, + }, { name: "No Files, Only SubDirectories", directory: "/logs", @@ -1224,8 +1217,6 @@ func TestWalkFiles(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // create a new in-memory filesystem for each test - afs := afero.NewMemMapFs() // Create the directory if test.directory != "" { @@ -1307,7 +1298,6 @@ func TestWalkFiles(t *testing.T) { if test.expectedError == nil { require.NoError(t, err, "running WalkFiles should not produce an error") } else { - require.Error(t, err, "running WalkFiles should produce an error") require.ErrorIs(t, err, test.expectedError, "error should match expected value") } From ee844663e4bec173725f0ad6040e3e6f4999c41b Mon Sep 17 00:00:00 2001 From: Keith Chew <40710037+mrkeithchew@users.noreply.github.com> Date: Wed, 31 Dec 2025 15:59:24 -0500 Subject: [PATCH 4/5] Update README.md --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c38e27e..171a69f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,9 @@ If you get value out of RITA and would like to go a step further with hunting automation, futuristic visualizations, and data enrichment, then take a look at [AC-Hunter](https://www.activecountermeasures.com/). -Sponsored by [Active Countermeasures](https://activecountermeasures.com/). +Brought to you by [Active Countermeasures](https://activecountermeasures.com/). + +Sponsored by [BHIS](https://www.blackhillsinfosec.com/). --- @@ -123,4 +125,4 @@ Check the value of the `"$TERM"` variable, this should be `xterm-256color`. If i Depending on the color theme of your terminal, the TUI will adjust to either a light mode or a dark mode. -If you're really fancy and like pretty colors, consider using the [Catpuccin](https://catppuccin.com/ports?q=terminal) theme! \ No newline at end of file +If you're really fancy and like pretty colors, consider using the [Catpuccin](https://catppuccin.com/ports?q=terminal) theme! From 2bd5475e7666531ca74ab5fa552cf39df4951b72 Mon Sep 17 00:00:00 2001 From: Liza Tsibur Date: Tue, 20 Jan 2026 17:50:56 -0700 Subject: [PATCH 5/5] Installer and Import Logic Updates (#90) * Disable permission flag checks on imported logs (#86) * Disable permission flag checks on imported logs * Display individual log errors even if there are no viable logs --------- Co-authored-by: Naomi Kramer * Installer Import Logic Updates (#89) * rename sshprep to sshprep.sh * installer updates removed mass upgrades, cleaned up installer, removed references to unsupported OS's, improved error handling and usage messages * threat intel feed error handling * cleaned up installer scripts and improved helper functions * Update ansible-installer.sh --------- Co-authored-by: Naomi Kramer --- .gitignore | 1 - .vscode/rita.code-workspace | 36 +- README.md | 2 +- cmd/import.go | 39 +- cmd/import_test.go | 66 +-- database/server.go | 7 +- database/threat_intel.go | 137 ++++-- database/threat_intel_test.go | 320 +++++++++++++ default_config.hjson | 12 +- installer/Installer.md | 3 +- installer/build_image.sh | 5 - installer/generate_installer.sh | 149 +++--- ...-here-tmp.sh => install-rita-zeek-here.sh} | 21 +- .../install_scripts/ansible-installer.sh | 436 ++++++++---------- installer/install_scripts/helper.sh | 188 +++++++- installer/install_scripts/install_post.yml | 63 --- installer/install_scripts/install_pre.yml | 321 ++----------- installer/install_scripts/install_rita.sh | 97 ++-- installer/install_scripts/install_rita.yml | 43 +- installer/install_scripts/install_zeek.yml | 39 +- .../install_scripts/{sshprep => sshprep.sh} | 9 +- installer/rita-install.md | 66 --- installer/run_dev.sh | 40 +- installer/test_installed.sh | 35 +- integration_rolling/init_ch.sh | 2 +- util/util.go | 5 + 26 files changed, 1134 insertions(+), 1008 deletions(-) delete mode 100755 installer/build_image.sh rename installer/{install-rita-zeek-here-tmp.sh => install-rita-zeek-here.sh} (57%) delete mode 100644 installer/install_scripts/install_post.yml rename installer/install_scripts/{sshprep => sshprep.sh} (98%) delete mode 100644 installer/rita-install.md diff --git a/.gitignore b/.gitignore index e5df6a5..9b366a7 100644 --- a/.gitignore +++ b/.gitignore @@ -19,7 +19,6 @@ /installer/stage /installer/rita-*.tar.gz /installer/rita-* -/installer/install-rita-zeek-here.sh !/installer/*.md # only commit .env, .env.production, and test.env files diff --git a/.vscode/rita.code-workspace b/.vscode/rita.code-workspace index 73b0a79..2ea3d52 100644 --- a/.vscode/rita.code-workspace +++ b/.vscode/rita.code-workspace @@ -6,15 +6,18 @@ ], "extensions": { "recommendations": [ + "golang.go", "qufiwefefwoyn.inline-sql-syntax", "wayou.vscode-todo-highlight", - "golang.go", - "hjson.hjson", - "Tanh.hjson-formatter", + "github.vscode-github-actions", "redhat.vscode-yaml", + "shd101wyy.markdown-preview-enhanced", + "laktak.hjson", + "Tanh.hjson-formatter", ] }, "settings": { + // === Go === "go.languageServerExperimentalFeatures": { "diagnostics": false }, @@ -31,6 +34,8 @@ "editor.defaultFormatter": "golang.go", "editor.formatOnSave": true }, + + // === JSON === "[json]": { "editor.defaultFormatter": "vscode.json-language-features", "editor.formatOnSave": true @@ -39,22 +44,39 @@ "editor.defaultFormatter": "vscode.json-language-features", "editor.formatOnSave": true }, + "[hjson]": { "editor.defaultFormatter": "Tanh.hjson-formatter", "editor.formatOnSave": true }, + // settings for Tanh.hjson-formatter extension "hjson-formatter.options": { + "condense": 0, "separator": true, - "condense": 100, "bracesSameLine": true, - "emitRootBraces": true, - "quotes": "strings", + "emitRootBraces": false, + "multiline": "no-tabs", "space": 4, - "eol": "auto", + "eol": "auto" }, + + // === YAML === "[yaml]": { "editor.defaultFormatter": "redhat.vscode-yaml", "editor.formatOnSave": true, + "editor.tabSize": 2 + }, + + "[github-actions-workflow]": { + "editor.defaultFormatter": "redhat.vscode-yaml", + "editor.formatOnSave": true, + "editor.tabSize": 2, + }, + + "[dockercompose]": { + "editor.defaultFormatter": "redhat.vscode-yaml", + "editor.formatOnSave": true, + "editor.tabSize": 2, } } } \ No newline at end of file diff --git a/README.md b/README.md index 171a69f..552f51f 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,7 @@ rita import --database=mydatabase --logs=~/mylogs `logs` is the path to the Zeek logs you wish to import -For datasets that should accumulate data over time, with the logs containing network info that is current (less than 24 hours old), use the `--rolling` flag during creation and each subsequent import into the dataset. The most common use case for this is importing logs from the a Zeek sensor on a cron job each hour. +For datasets that should accumulate data over time, with the logs containing network info that is current (less than 24 hours old), use the `--rolling` flag during creation and each subsequent import into the dataset. The most common use case for this is importing logs from a Zeek sensor on a cron job each hour. Note: For datasets that contain over 24 hours of logs, but are over 24 hours old, simply import the top-level directory of the set of logs **without** the `--rolling` flag. Importing these logs with the `--rolling` flag may result in incorrect results. diff --git a/cmd/import.go b/cmd/import.go index 4299ddc..81c8024 100644 --- a/cmd/import.go +++ b/cmd/import.go @@ -46,10 +46,6 @@ var ErrIncompatibleFileExtension = errors.New("incompatible file extension") var ErrSkippedDuplicateLog = errors.New("encountered file with same name but different extension, skipping file due to older last modified time") var ErrMissingLogDirectory = errors.New("log directory flag is required") -type WalkError struct { - Path string - Error error -} type HourlyZeekLogs []map[string][]string var ImportCommand = &cli.Command{ @@ -161,15 +157,19 @@ func RunImportCmd(startTime time.Time, cfg *config.Config, afs afero.Fs, logDir // get list of hourly log maps of all days of log files in directory logMap, walkErrors, err := WalkFiles(afs, logDir, db.Rolling) - if err != nil { - return importResults, err - } - // log any errors that occurred during the walk + // log any errors that occurred during the walk, before returning + // this is especially useful when all files in the directory are invalid + // instead of only logging 'no valid files found' for _, walkErr := range walkErrors { logger.Debug().Str("path", walkErr.Path).Err(walkErr.Error).Msg("file was left out of import due to error or incompatibility") } + // return if the walk failed completely + if err != nil { + return importResults, err + } + var elapsedTime int64 // loop through each day @@ -381,7 +381,7 @@ func ParseFolderDate(folder string) (time.Time, error) { // WalkFiles starts a goroutine to walk the directory tree at root and send the // path of each regular file on the string channel. It sends the result of the // walk on the error channel. If done is closed, WalkFiles abandons its work. -func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []WalkError, error) { +func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []util.WalkError, error) { // check if root is a valid directory or file err := util.ValidateDirectory(afs, root) if err != nil && !errors.Is(err, util.ErrPathIsNotDir) { @@ -403,13 +403,13 @@ func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []Wal } fTracker := make(map[string]fileTrack) - var walkErrors []WalkError + var walkErrors []util.WalkError err = afero.Walk(afs, root, func(path string, info os.FileInfo, afErr error) error { // check if afero failed to access or find a file or directory if afErr != nil { - walkErrors = append(walkErrors, WalkError{Path: path, Error: afErr}) + walkErrors = append(walkErrors, util.WalkError{Path: path, Error: afErr}) return nil //nolint:nilerr // log the issue and continue walking } @@ -420,14 +420,13 @@ func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []Wal // skip if file is not a compatible log file if !(strings.HasSuffix(path, ".log") || strings.HasSuffix(path, ".gz")) { - walkErrors = append(walkErrors, WalkError{Path: path, Error: ErrIncompatibleFileExtension}) + walkErrors = append(walkErrors, util.WalkError{Path: path, Error: ErrIncompatibleFileExtension}) return nil // log the issue and continue walking } // check if the file is readable - _, err := afs.Open(path) - if err != nil || !(info.Mode().Perm()&0444 == 0444) { - walkErrors = append(walkErrors, WalkError{Path: path, Error: ErrInsufficientReadPermissions}) + if _, err := afs.Open(path); err != nil { + walkErrors = append(walkErrors, util.WalkError{Path: path, Error: ErrInsufficientReadPermissions}) return nil //nolint:nilerr // log the issue and continue walking } @@ -454,7 +453,7 @@ func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []Wal case exists && fileData.lastModified.UTC().Before(info.ModTime().UTC()): // warn the user so that this isn't a silent operation - walkErrors = append(walkErrors, WalkError{Path: fTracker[trimmedFileName].path, Error: ErrSkippedDuplicateLog}) + walkErrors = append(walkErrors, util.WalkError{Path: fTracker[trimmedFileName].path, Error: ErrSkippedDuplicateLog}) // logger.Warn().Str("original_path", fTracker[trimmedFileName].path).Str("replacement_path", path).Msg("encountered file with same name but different extension, potential duplicate log, skipping") fTracker[trimmedFileName] = fileTrack{ @@ -463,7 +462,7 @@ func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []Wal } // if the current file is older than the one we have already seen or no other conditions are met, skip it default: - walkErrors = append(walkErrors, WalkError{Path: path, Error: ErrSkippedDuplicateLog}) + walkErrors = append(walkErrors, util.WalkError{Path: path, Error: ErrSkippedDuplicateLog}) } @@ -497,21 +496,21 @@ func WalkFiles(afs afero.Fs, root string, rolling bool) ([]HourlyZeekLogs, []Wal case strings.HasPrefix(filepath.Base(path), c.OpenSSLPrefix): prefix = c.OpenSSLPrefix default: // skip file if it doesn't match any of the accepted prefixes - walkErrors = append(walkErrors, WalkError{Path: path, Error: ErrInvalidLogType}) + walkErrors = append(walkErrors, util.WalkError{Path: path, Error: ErrInvalidLogType}) continue } // parse the hour from the filename hour, err := ParseHourFromFilename(file.path) if err != nil { - walkErrors = append(walkErrors, WalkError{Path: path, Error: err}) + walkErrors = append(walkErrors, util.WalkError{Path: path, Error: err}) continue } parentDir := filepath.Base(filepath.Dir(file.path)) folderDate, err := ParseFolderDate(parentDir) if err != nil { - walkErrors = append(walkErrors, WalkError{Path: path, Error: err}) + walkErrors = append(walkErrors, util.WalkError{Path: path, Error: err}) } // Check if the entry for the day exists, if not, initialize it diff --git a/cmd/import_test.go b/cmd/import_test.go index 0ef1c65..b660c00 100644 --- a/cmd/import_test.go +++ b/cmd/import_test.go @@ -622,7 +622,6 @@ func createExpectedResults(logs []cmd.HourlyZeekLogs) []cmd.HourlyZeekLogs { } func TestWalkFiles(t *testing.T) { - afs := afero.NewMemMapFs() tests := []struct { name string @@ -632,7 +631,7 @@ func TestWalkFiles(t *testing.T) { subdirectories []string files []string expectedFiles []cmd.HourlyZeekLogs - expectedWalkErrors []cmd.WalkError + expectedWalkErrors []util.WalkError rolling bool expectedError error }{ @@ -661,7 +660,7 @@ func TestWalkFiles(t *testing.T) { }, }, }), - expectedWalkErrors: []cmd.WalkError{ + expectedWalkErrors: []util.WalkError{ {Path: "/logs/.DS_STORE", Error: cmd.ErrIncompatibleFileExtension}, {Path: "/logs/capture_loss.16:00:00-17:00:00.log.gz", Error: cmd.ErrInvalidLogType}, {Path: "/logs/stats.16:00:00-17:00:00.log.gz", Error: cmd.ErrInvalidLogType}, @@ -1062,7 +1061,7 @@ func TestWalkFiles(t *testing.T) { }, }, }), - expectedWalkErrors: []cmd.WalkError{ + expectedWalkErrors: []util.WalkError{ {Path: "/logs_dupe/conn.log", Error: cmd.ErrSkippedDuplicateLog}, }, expectedError: nil, @@ -1083,7 +1082,7 @@ func TestWalkFiles(t *testing.T) { }, }, }), - expectedWalkErrors: []cmd.WalkError{ + expectedWalkErrors: []util.WalkError{ {Path: "/logs_dupe/conn.log.gz", Error: cmd.ErrSkippedDuplicateLog}, }, expectedError: nil, @@ -1096,7 +1095,7 @@ func TestWalkFiles(t *testing.T) { files: []string{ ".log.gz", ".log", ".foo", }, - expectedWalkErrors: []cmd.WalkError{ + expectedWalkErrors: []util.WalkError{ {Path: "/logs/.log", Error: cmd.ErrInvalidLogType}, {Path: "/logs/.log.gz", Error: cmd.ErrSkippedDuplicateLog}, {Path: "/logs/.foo", Error: cmd.ErrIncompatibleFileExtension}, @@ -1113,7 +1112,7 @@ func TestWalkFiles(t *testing.T) { ".conn", ".conn_", ".dns", ".dns_", ".http", ".http_", ".ssl", ".ssl_", ".bing", "._bong", "dns_file", }, - expectedWalkErrors: []cmd.WalkError{ + expectedWalkErrors: []util.WalkError{ {Path: "/logs/conn", Error: cmd.ErrIncompatibleFileExtension}, {Path: "/logs/dns", Error: cmd.ErrIncompatibleFileExtension}, {Path: "/logs/http", Error: cmd.ErrIncompatibleFileExtension}, @@ -1145,7 +1144,7 @@ func TestWalkFiles(t *testing.T) { "files.log", "ntp.log", "radius.log", "sip.log", "x509.log.gz", "dhcp.log", "weird.log", "conn_summary.log", "conn-summary.log", "foo.log", }, - expectedWalkErrors: []cmd.WalkError{ + expectedWalkErrors: []util.WalkError{ {Path: "/logs/files.log", Error: cmd.ErrInvalidLogType}, {Path: "/logs/ntp.log", Error: cmd.ErrInvalidLogType}, {Path: "/logs/radius.log", Error: cmd.ErrInvalidLogType}, @@ -1159,25 +1158,33 @@ func TestWalkFiles(t *testing.T) { }, expectedError: cmd.ErrNoValidFilesFound, }, - { - name: "No Read Permissions on Files", - directory: "/logs", - directoryPermissions: iofs.FileMode(0o775), - filePermissions: iofs.FileMode(0o000), - files: []string{ - "conn.log", "dns.log", "http.log", "ssl.log", "open_conn.log", "open_http.log", "open_ssl.log", - }, - expectedWalkErrors: []cmd.WalkError{ - {Path: "/logs/conn.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/dns.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/http.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/ssl.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/open_conn.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/open_http.log", Error: cmd.ErrInsufficientReadPermissions}, - {Path: "/logs/open_ssl.log", Error: cmd.ErrInsufficientReadPermissions}, - }, - expectedError: cmd.ErrNoValidFilesFound, - }, + + // Previously, read permissions were checked with !(info.Mode().Perm()&0444 == 0444), but + // this requires all read permissions (user, group, others)/0644 to be set which is not ideal. + // A better check would be to see if any read permission is set, i.e., (info.Mode().Perm()&0444 != 0). + // However, since some ACL systems/SELinux might interfere with this, it's better to let the Open() call + // return an error if permission is denied. + // Unfortunately, afero.MemMapFs does not support file permissions when using Open, so this test is skipped. + // https://github.com/spf13/afero/issues/150 + // { + // name: "No Read Permissions on Files", + // directory: "/logs", + // directoryPermissions: iofs.FileMode(0o775), + // filePermissions: iofs.FileMode(0o000), + // files: []string{ + // "conn.log", "dns.log", "http.log", "ssl.log", "open_conn.log", "open_http.log", "open_ssl.log", + // }, + // expectedWalkErrors: []util.WalkError{ + // {Path: "/logs/conn.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/dns.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/http.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/ssl.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/open_conn.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/open_http.log", Error: cmd.ErrInsufficientReadPermissions}, + // {Path: "/logs/open_ssl.log", Error: cmd.ErrInsufficientReadPermissions}, + // }, + // expectedError: cmd.ErrNoValidFilesFound, + // }, { name: "No Files, Only SubDirectories", directory: "/logs", @@ -1217,6 +1224,8 @@ func TestWalkFiles(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // create a new in-memory filesystem for each test + afs := afero.NewMemMapFs() // Create the directory if test.directory != "" { @@ -1283,7 +1292,7 @@ func TestWalkFiles(t *testing.T) { // walk the directory var logMap []cmd.HourlyZeekLogs - var walkErrors []cmd.WalkError + var walkErrors []util.WalkError var err error // since some of the tests are for files passed in to the import command instead of the root directory, we need to @@ -1298,6 +1307,7 @@ func TestWalkFiles(t *testing.T) { if test.expectedError == nil { require.NoError(t, err, "running WalkFiles should not produce an error") } else { + require.Error(t, err, "running WalkFiles should produce an error") require.ErrorIs(t, err, test.expectedError, "error should match expected value") } diff --git a/database/server.go b/database/server.go index 1da6aa8..18ecbc6 100644 --- a/database/server.go +++ b/database/server.go @@ -71,26 +71,31 @@ func SetUpNewImport(afs afero.Fs, cfg *config.Config, dbName string, rollingFlag logger.Info().Str("database", dbName).Msg("Successfully rebuilt import database") } + // check rolling status of database rolling, err := server.checkRolling(dbName, rollingFlag, rebuildFlag) if err != nil { return nil, err } + // create sensor database db, err := server.createSensorDatabase(cfg, dbName, rolling) if err != nil { return nil, err } + // reset temporary tables err = db.ResetTemporaryTables() if err != nil { return nil, err } + // sync threat intel feeds from config err = server.syncThreatIntelFeedsFromConfig(afs, cfg) if err != nil { return nil, err } + // import valid MIME types err = server.importValidMIMETypes(cfg) if err != nil { return nil, err @@ -101,7 +106,7 @@ func SetUpNewImport(afs afero.Fs, cfg *config.Config, dbName string, rollingFlag // return nil, err // } - // // set rolling flag + // set rolling flag db.Rolling = rollingFlag // set rebuild flag diff --git a/database/threat_intel.go b/database/threat_intel.go index 08ae7d8..4ff9fba 100644 --- a/database/threat_intel.go +++ b/database/threat_intel.go @@ -3,7 +3,7 @@ package database import ( "bufio" "context" - "errors" + "fmt" "io" "net/http" "net/netip" @@ -83,9 +83,12 @@ func (server *ServerConn) syncThreatIntelFeedsFromConfig(afs afero.Fs, cfg *conf logger := zlog.GetLogger() // get the list of threat intel feeds from the config - feeds, err := getThreatIntelFeeds(afs, cfg) + feeds, walkErrs, err := getThreatIntelFeeds(afs, cfg) if err != nil { - return err + logger.Warn().Err(err).Str("directory", cfg.Env.ThreatIntelCustomFeedsDirectory).Msg("[THREAT INTEL] Failed to load feeds from custom feeds directory, skipping...") + } + for _, we := range walkErrs { + logger.Warn().Err(we.Error).Str("path", we.Path).Msg("[THREAT INTEL] Issue encountered while loading custom feed, skipping...") } // get list of all feeds from the metadatabase @@ -148,19 +151,31 @@ func (server *ServerConn) syncThreatIntelFeedsFromConfig(afs afero.Fs, cfg *conf // download the feed feed, err = getOnlineFeed(server.GetContext(), entry.Path) if err != nil { - return err + // log the error as a warning and continue. do not return an error, as this should not stop the import process + logger.Warn().Err(err).Str("feed_url", entry.Path).Msg("[THREAT INTEL] Failed to download online feed, could not update feed in database...") + + //NOTE: should we remove the feed from the database if we can't download an updated version? + + //skip to next feed + continue } - // if feed has has an oudated last modified date, update as custom feed + // if file feed has has an oudated last modified date, update as custom feed case !entry.LastModifiedOnDisk.Equal(feeds[entry.Path].LastModified): logger.Info().Str("feed_path", entry.Path).Msg("[THREAT INTEL] Updating custom feed because it has been modified...") // open the feed file - feed, err = getCustomFeed(entry.Path) + feed, err = getCustomFeed(afs, entry.Path) if err != nil { - return err + // log the error as a warning and continue. do not return an error, as this should not stop the import process + logger.Warn().Err(err).Str("feed_path", entry.Path).Msg("[THREAT INTEL] Failed to open custom feed, could not update feed in database...") + + //NOTE: should we remove the feed from the database if we can't download an updated version? + + // skip to next feed + continue } - // feed is up to date, skip ahead to next feed + // file feed is current, skip to next feed default: continue @@ -172,29 +187,33 @@ func (server *ServerConn) syncThreatIntelFeedsFromConfig(afs afero.Fs, cfg *conf } } + // iterate over each feed in the config that was not in the database for path := range feeds { entry := feeds[path] if !entry.Existing { var feed io.ReadCloser if entry.Online { + logger.Info().Str("feed_url", path).Msg("[THREAT INTEL] Adding new online feed...") // download the feed feed, err = getOnlineFeed(server.GetContext(), path) if err != nil { - return err + // log the error and skip adding the feed, but do not return an error, as this should not stop the import process + logger.Warn().Err(err).Str("feed_url", path).Msg("[THREAT INTEL] Failed to download online feed, skipping addition to database...") + // skip to next feed + continue } - logger.Info().Str("feed_url", path).Msg("[THREAT INTEL] Adding new online feed...") - } else { + logger.Info().Str("feed_path", path).Msg("[THREAT INTEL] Adding new custom feed...") // open the feed file - feed, err = getCustomFeed(path) + feed, err = getCustomFeed(afs, path) if err != nil { - return err + // log the error and skip adding the feed, but do not return an error, as this should not stop the import process + logger.Warn().Err(err).Str("feed_path", path).Msg("[THREAT INTEL] Failed to open custom feed, skipping addition to database...") + // skip to next feed + continue } - logger.Info().Str("feed_path", path).Msg("[THREAT INTEL] Adding new custom feed...") - } - // add the new feed to the database if err = server.addNewFeed(path, &entry, feed, writer.WriteChannel); err != nil { return err @@ -205,61 +224,75 @@ func (server *ServerConn) syncThreatIntelFeedsFromConfig(afs afero.Fs, cfg *conf return nil } -// fs := afero.NewOsFs() // getThreatIntelFeeds parses the threat intel sources from the config file into a feed map -func getThreatIntelFeeds(afs afero.Fs, cfg *config.Config) (map[string]threatIntelFeed, error) { +func getThreatIntelFeeds(afs afero.Fs, cfg *config.Config) (map[string]threatIntelFeed, []util.WalkError, error) { + // initialize feeds map feeds := make(map[string]threatIntelFeed) + // add custom feed sources - if err := getCustomFeedsList(afs, feeds, cfg.Env.ThreatIntelCustomFeedsDirectory); err != nil { - return nil, err - } + walkErrs, err := getCustomFeedsList(afs, feeds, cfg.Env.ThreatIntelCustomFeedsDirectory) // add online feed sources (with last modified time set to zero) getOnlineFeedsList(feeds, cfg.RITA.ThreatIntel.OnlineFeeds) - return feeds, nil + return feeds, walkErrs, err } // getCustomFeedsList populates the feeds map with the custom feed files contained in a specified directory // and their last modified times -func getCustomFeedsList(afs afero.Fs, feeds map[string]threatIntelFeed, dirPath string) error { - logger := zlog.GetLogger() - +func getCustomFeedsList(afs afero.Fs, feeds map[string]threatIntelFeed, dirPath string) ([]util.WalkError, error) { feedDir, err := util.ParseRelativePath(dirPath) if err != nil { - return err + return nil, err } - logger.Debug().Str("directory", feedDir).Msg("custom feed directory for threat intel") // check if directory is valid - err = util.ValidateDirectory(afs, feedDir) - if err != nil { - // return nil if the directory doesn't exist or contains no files - if errors.Is(err, util.ErrDirDoesNotExist) || errors.Is(err, util.ErrDirIsEmpty) { - return nil - } - return err + if err := util.ValidateDirectory(afs, feedDir); err != nil { + return nil, err } + var walkErrs []util.WalkError + // walk the directory and add each file to the feeds map - err = afero.Walk(afs, feedDir, func(path string, info os.FileInfo, err error) error { + if err := afero.Walk(afs, feedDir, func(path string, info os.FileInfo, err error) error { if err != nil { - return err + walkErrs = append(walkErrs, util.WalkError{ + Path: path, + Error: err, + }) } if !info.IsDir() { if filepath.Ext(path) == ".txt" { feeds[path] = threatIntelFeed{ LastModified: info.ModTime().UTC().Truncate(time.Second), } + } else { + // add to walk errors and continue + walkErrs = append(walkErrs, util.WalkError{ + Path: path, + Error: fmt.Errorf("invalid file extension for threat intel feed, must be .txt"), + }) } } return nil - }) - if err != nil { - return err + }); err != nil { + return walkErrs, err } - return nil + return walkErrs, nil +} + +// getCustomFeed opens the custom feed from the specified path and returns an io.ReadCloser +func getCustomFeed(afs afero.Fs, path string) (io.ReadCloser, error) { + if err := util.ValidateFile(afs, path); err != nil { + return nil, err + } + + file, err := afs.Open(path) + if err != nil { + return nil, err + } + return file, nil } // getOnlineFeedsList populates the feeds map with the passed in online feed sources (with last modified time set to zero) @@ -273,27 +306,27 @@ func getOnlineFeedsList(feeds map[string]threatIntelFeed, onlineFeedsList []stri // getOnlineFeed gets the feed at the specified URL and returns an io.ReadCloser func getOnlineFeed(ctx context.Context, url string) (io.ReadCloser, error) { - + // build request with context req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err } + // execute request resp, err := http.DefaultClient.Do(req) if err != nil { - return nil, err + return nil, fmt.Errorf("request failed: %w", err) } - return resp.Body, nil -} - -// getCustomFeed opens the custom feed from the specified path and returns an io.ReadCloser -func getCustomFeed(path string) (io.ReadCloser, error) { - file, err := os.Open(path) - if err != nil { - return nil, err + // fail if status code is not OK + // this is necessary for cases where the domain is valid but the resource is not found (will pass earlier err check) + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, fmt.Errorf("request failed: %d (%s)", resp.StatusCode, resp.Status) } - return file, nil + + // return response body + return resp.Body, nil } func (server *ServerConn) updateFeed(entry *threatIntelFeedRecord, lastModified time.Time, feed io.ReadCloser, writeChan chan Data) error { @@ -421,6 +454,8 @@ func parseFeedEntries(feedHash util.FixedString, feed io.ReadCloser, writeChan c // send fqdn to writer feedEntry.FQDN = line writeChan <- feedEntry + } else { + // invalid entry, skip } } else { // send IP as IPv6 to writer diff --git a/database/threat_intel_test.go b/database/threat_intel_test.go index bf929f6..3add6b4 100644 --- a/database/threat_intel_test.go +++ b/database/threat_intel_test.go @@ -3,7 +3,10 @@ package database import ( "bufio" "context" + "fmt" "io" + "net/http" + "net/http/httptest" "regexp" "strconv" "strings" @@ -11,6 +14,7 @@ import ( "testing" "github.com/activecm/rita/v5/util" + "github.com/spf13/afero" "github.com/stretchr/testify/require" ) @@ -75,6 +79,7 @@ func TestParseOnlineFeeds(t *testing.T) { require.NoError(t, err, "parsing feed entries should not produce an error") // close channel and wait for go routine to finish + feed.Close() close(c) wg.Wait() @@ -120,4 +125,319 @@ func TestParseOnlineFeeds(t *testing.T) { require.Positive(t, total, "at least one fqdn should have been parsed") }) + + t.Run("Invalid Online Feed", func(t *testing.T) { + // create a channel to mimic the writer which would receive the parsed data + d := make(chan Data) + total := 0 + + // make a go routine to read from the channel and increment total + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for range d { + total++ + } + }() + + // attempt to get feed from non existent domain + url := "http://nonexistent.domain.abc12345/" + feed, err := getOnlineFeed(context.Background(), url) + require.Error(t, err, "getting online feed should produce an error") + require.Nil(t, feed, "feed should be nil") + + // attempt to get feed from existing domain but non existent resource + url = "http://example.com/nonexistentresource.txt" + feed, err = getOnlineFeed(context.Background(), url) + require.Error(t, err, "getting online feed should produce an error") + require.Nil(t, feed, "feed should be nil") + + // close channel and wait for go routine to finish + close(d) + wg.Wait() + + // make sure no entries were parsed + require.Zero(t, total, "no entries should have been parsed") + }) +} + +func TestGetOnlineFeed(t *testing.T) { + ctx := context.Background() + + type testCase struct { + name string + url string + setup func() string + expectErr []string + } + + tests := []testCase{ + { + name: "Valid Entry", + setup: func() string { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, "bing bong") + })) + t.Cleanup(srv.Close) + return srv.URL + }, + }, + { + name: "Non-Existent Domain", + url: "http://nonexistent.domain.abc12345/", + expectErr: []string{"request failed"}, + }, + { + name: "Non-Existent Resource On Existing Domain", + setup: func() string { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "chicken strip", http.StatusNotFound) + })) + t.Cleanup(srv.Close) + return srv.URL + }, + expectErr: []string{"404", "Not Found"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + // get url + url := tc.url + + // setup mock server if needed + if tc.setup != nil { + url = tc.setup() + } + + // get online feed + body, err := getOnlineFeed(ctx, url) + + // validate error case + if len(tc.expectErr) > 0 { + require.Error(t, err, "expected error but did not get one") + require.Nil(t, body, "body must be nil on error") + for _, msg := range tc.expectErr { + require.ErrorContains(t, err, msg, "error message does not contain expected text") + } + return + } + + // validate success case + require.NoError(t, err, "did not expect an error for this test case") + require.NotNil(t, body, "body should not be nil for successful fetch") + + data, readErr := io.ReadAll(body) + require.NoError(t, readErr) + require.NotEmpty(t, data, "successful response should contain data") + + body.Close() + }) + } +} +func TestGetCustomFeed(t *testing.T) { + + type testCase struct { + name string + path string + setup func(afero.Fs) string + expectErr error + } + + tests := []testCase{ + { + name: "Valid File", + setup: func(afs afero.Fs) string { + // tmp, err := os.CreateTemp("", "customfeed-*") + tmp, err := afero.TempFile(afs, "", "customfeed-*.txt") + require.NoError(t, err) + t.Cleanup(func() { afs.Remove(tmp.Name()) }) + + _, writeErr := tmp.WriteString("bing bong") + require.NoError(t, writeErr) + + require.NoError(t, tmp.Close()) + return tmp.Name() + }, + }, + { + name: "Non Existent File", + path: "/this/does/not/exist.txt", + expectErr: util.ErrFileDoesNotExist, + }, + { + name: "Path Is Directory", + setup: func(afs afero.Fs) string { + dir := "/somedir" + require.NoError(t, afs.MkdirAll(dir, 0o755)) + return dir + }, + expectErr: util.ErrPathIsDir, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + afs := afero.NewMemMapFs() + // get path + path := tc.path + if tc.setup != nil { + path = tc.setup(afs) + } + + // get custom feed + body, err := getCustomFeed(afs, path) + + // validate error case + if tc.expectErr != nil { + require.Error(t, err, "expected error but did not get one") + require.Nil(t, body, "body must be nil on error") + require.ErrorContains(t, err, tc.expectErr.Error(), "error message does not contain expected text") + + return + } + + // validate success case + require.NoError(t, err, "did not expect an error for this test case") + require.NotNil(t, body, "body should not be nil for valid file") + + data, readErr := io.ReadAll(body) + require.NoError(t, readErr) + require.NotEmpty(t, data, "file should not be empty") + + body.Close() + }) + } +} + +func TestGetCustomFeedsList(t *testing.T) { + + type testCase struct { + name string + setup func(afs afero.Fs) string + dirPath string + expectErr error + expectedWalkErrors []string + expectFiles []string + } + + tests := []testCase{ + + { + name: "Valid Directory With TXT Files", + setup: func(afs afero.Fs) string { + dir := "/feeds" + require.NoError(t, afs.MkdirAll(dir, 0o755)) + + require.NoError(t, afero.WriteFile(afs, "/feeds/a.txt", []byte("aaa"), 0o644)) + require.NoError(t, afero.WriteFile(afs, "/feeds/b.txt", []byte("bbb"), 0o644)) + return dir + }, + expectFiles: []string{"/feeds/a.txt", "/feeds/b.txt"}, + }, + { + name: "Valid Directory With TXT File and a Non-TXT File", + setup: func(afs afero.Fs) string { + dir := "/feeds" + require.NoError(t, afs.MkdirAll(dir, 0o755)) + require.NoError(t, afero.WriteFile(afs, "/feeds/a.txt", []byte("aaa"), 0o644)) + require.NoError(t, afero.WriteFile(afs, "/feeds/b.txt", []byte("bbb"), 0o644)) + // not-txt file + require.NoError(t, afero.WriteFile(afs, "/feeds/image.png", []byte("png"), 0o644)) + return dir + }, + expectFiles: []string{"/feeds/a.txt", "/feeds/b.txt"}, + expectedWalkErrors: []string{"/feeds/image.png"}, + }, + { + name: "Directory Contains Only Non TXT Files", + setup: func(afs afero.Fs) string { + dir := "/nontxt" + require.NoError(t, afs.MkdirAll(dir, 0o755)) + require.NoError(t, afero.WriteFile(afs, "/nontxt/a.json", []byte("{}"), 0o644)) + require.NoError(t, afero.WriteFile(afs, "/nontxt/b.csv", []byte("x,y"), 0o644)) + return dir + }, + expectedWalkErrors: []string{"/nontxt/a.json", "/nontxt/b.csv"}, + }, + { + name: "Directory Does Not Exist", + dirPath: "/missing", + expectErr: util.ErrDirDoesNotExist, + }, + { + name: "Directory Is Empty", + setup: func(afs afero.Fs) string { + dir := "/empty" + require.NoError(t, afs.MkdirAll(dir, 0o755)) + return dir + }, + expectErr: util.ErrDirIsEmpty, + }, + { + name: "Path Is File Not Directory", + setup: func(afs afero.Fs) string { + dir := "/feeds" + require.NoError(t, afs.MkdirAll(dir, 0o755)) + filePath := "/feeds/file.txt" + require.NoError(t, afero.WriteFile(afs, filePath, []byte("data"), 0o644)) + return filePath + }, + expectErr: util.ErrPathIsNotDir, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + afs := afero.NewMemMapFs() + feeds := make(map[string]threatIntelFeed) + + // get directory path + dirPath := tc.dirPath + if tc.setup != nil { + dirPath = tc.setup(afs) + } + + // call function + walkErrs, err := getCustomFeedsList(afs, feeds, dirPath) + + if len(tc.expectedWalkErrors) > 0 || tc.expectErr != nil { + // validate error case + if tc.expectErr != nil { + require.Error(t, err, "expected error but did not get one") + require.ErrorContains(t, err, tc.expectErr.Error(), "error message does not contain expected text") + } + + // validate walk errors + if len(tc.expectedWalkErrors) > 0 { + require.Len(t, walkErrs, len(tc.expectedWalkErrors), "walk errors length mismatch") + for i, msg := range tc.expectedWalkErrors { + require.EqualValues(t, walkErrs[i].Path, msg, "walk error message does not contain expected text") + } + } + + return + } else { + // validate success case + require.NoError(t, err, "did not expect an error for this test case") + require.Empty(t, walkErrs, "did not expect any walk errors for this test case") + } + + // compare expected feeds + if tc.expectFiles == nil { + require.Empty(t, feeds, "feeds map should be empty") + } else { + require.Len(t, feeds, len(tc.expectFiles), "feeds map size mismatch") + + for _, f := range tc.expectFiles { + _, ok := feeds[f] + require.True(t, ok, "expected feed not found: %s", f) + } + } + }) + } } diff --git a/default_config.hjson b/default_config.hjson index 11b3556..d83a847 100644 --- a/default_config.hjson +++ b/default_config.hjson @@ -28,12 +28,16 @@ // always_included_subnets overrides the never_included_* and internal_subnets section, // making sure that any connection records containing addresses from these arrays are kept and not filtered // Note: the IP address of a proxy must be included here if the proxy is internal - "always_included_subnets": [], // array of CIDRs - "always_included_domains": [], // array of FQDNs + "always_included_subnets": [ + ], // array of CIDRs + "always_included_domains": [ + ], // array of FQDNs // connections involving ranges entered into never_included_subnets are filtered out at import time - "never_included_subnets": [], // array of CIDRs - "never_included_domains": [], // array of FQDNs + "never_included_subnets": [ + ], // array of CIDRs + "never_included_domains": [ + ], // array of FQDNs "filter_external_to_internal": true // ignores any entries where communication is occurring from an external host to an internal host }, "scoring": { diff --git a/installer/Installer.md b/installer/Installer.md index 3da43b5..2bf54a3 100644 --- a/installer/Installer.md +++ b/installer/Installer.md @@ -7,12 +7,11 @@ rita-.tar.gz │ install_rita.sh | install_zeek.yml | install_pre.yml -| install_post.yml │ └───/scripts │ │ ansible-installer.sh │ │ helper.sh -│ │ sshprep +│ │ sshprep.sh │ └───/files │ │ diff --git a/installer/build_image.sh b/installer/build_image.sh deleted file mode 100755 index 5936070..0000000 --- a/installer/build_image.sh +++ /dev/null @@ -1,5 +0,0 @@ -# local test script to build RITA as an amd64 Docker image and export it to a file -VERSION=$(git describe --always --abbrev=0 --tags) - -sudo docker buildx build --platform linux/amd64 --tag ghcr.io/activecm/rita:"$VERSION" ../ -docker save -o rita-"$VERSION"-image.tar ghcr.io/activecm/rita:"$VERSION" \ No newline at end of file diff --git a/installer/generate_installer.sh b/installer/generate_installer.sh index 51918ae..3fcf07d 100755 --- a/installer/generate_installer.sh +++ b/installer/generate_installer.sh @@ -1,111 +1,112 @@ #!/usr/bin/env bash -set -e +set -euo pipefail -# Generates the RITA installer by creating a temporary folder in the current directory named 'stage' +# This script generates the RITA installer by creating a temporary folder in the current directory named 'stage' # and copies files that must be in the installer into the stage folder. -# Once all directories are placed in stage, it is compressed and stage is deleted +# Once all directories are placed in stage, it is compressed and stage is deleted. ZEEK_VERSION=6.2.1 +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +RITA_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" + +# load helper functions +HELPER_FILE="$SCRIPT_DIR/install_scripts/helper.sh" +[[ -f "$HELPER_FILE" ]] || { echo "Helper functions script not found: $HELPER_FILE" >&2; exit 1; } +# shellcheck disable=SC1090 +source "$HELPER_FILE" + + # get RITA version from git -VERSION=$(git describe --always --abbrev=0 --tags) -echo "Generating installer for RITA $VERSION..." +if VERSION="$(git -C "$RITA_DIR" describe --tags --exact-match 2>/dev/null)"; then + : # release / ci +elif VERSION="$(git -C "$RITA_DIR" describe --tags --dirty --always 2>/dev/null)"; then + : # dev +else + fail "Unable to determine RITA_VERSION." +fi +[[ -n "$VERSION" ]] || { echo "Unable to determine RITA_VERSION." >&2; exit 1; } +status "Generating installer for RITA $VERSION..." # change working directory to directory of this script -pushd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" > /dev/null - -BASE_DIR="./rita-$VERSION-installer" # was ./stage/bin +# pushd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" > /dev/null -# create staging folder -rm -rf "$BASE_DIR" -# mkdir ./stage +# create staging directory +INSTALLER_DIR="${SCRIPT_DIR}/rita-$VERSION-installer" +OUTPUT_TARBALL="${SCRIPT_DIR}/rita-$VERSION.tar.gz" +remove_dir "$INSTALLER_DIR" +remove_file "$OUTPUT_TARBALL" +create_new_dir "$INSTALLER_DIR" # create ansible subfolders -SCRIPTS="$BASE_DIR/scripts" -ANSIBLE_FILES="$BASE_DIR/files" - -mkdir "$BASE_DIR" -mkdir -p "$ANSIBLE_FILES" -mkdir -p "$SCRIPTS" +SCRIPTS="$INSTALLER_DIR/scripts" +ANSIBLE_FILES="$INSTALLER_DIR/files" +create_new_dir "$SCRIPTS" +create_new_dir "$ANSIBLE_FILES" # create subfolders (for files that installed RITA will contain) INSTALL_OPT="$ANSIBLE_FILES"/opt INSTALL_ETC="$ANSIBLE_FILES"/etc -mkdir "$ANSIBLE_FILES"/opt -mkdir "$ANSIBLE_FILES"/etc - +create_new_dir "$INSTALL_OPT" +create_new_dir "$INSTALL_ETC" # copy files in base dir -cp ./install_scripts/install_zeek.yml "$BASE_DIR" -cp ./install_scripts/install_rita.yml "$BASE_DIR" -cp ./install_scripts/install_pre.yml "$BASE_DIR" -cp ./install_scripts/install_post.yml "$BASE_DIR" +copy_file "${SCRIPT_DIR}/install-rita-zeek-here.sh" "$INSTALLER_DIR" +copy_file "${SCRIPT_DIR}/install_scripts/install_zeek.yml" "$INSTALLER_DIR" +copy_file "${SCRIPT_DIR}/install_scripts/install_rita.yml" "$INSTALLER_DIR" +copy_file "${SCRIPT_DIR}/install_scripts/install_pre.yml" "$INSTALLER_DIR" -cp ./install_scripts/install_rita.sh "$BASE_DIR" # entrypoint +copy_file "${SCRIPT_DIR}/install_scripts/install_rita.sh" "$INSTALLER_DIR" # entrypoint # copy files to helper script folder -cp ./install_scripts/ansible-installer.sh "$SCRIPTS" -cp ./install_scripts/helper.sh "$SCRIPTS" -cp ./install_scripts/sshprep "$SCRIPTS" +copy_file "${SCRIPT_DIR}/install_scripts/ansible-installer.sh" "$SCRIPTS" +copy_file "${SCRIPT_DIR}/install_scripts/helper.sh" "$SCRIPTS" +copy_file "${SCRIPT_DIR}/install_scripts/sshprep.sh" "$SCRIPTS" # copy files to the ansible files folder -cp ./install_scripts/docker-compose "$ANSIBLE_FILES" # docker-compose v1 backwards compatibility script - +copy_file "${SCRIPT_DIR}/install_scripts/docker-compose" "$ANSIBLE_FILES" # docker-compose v1 backwards compatibility script # copy over configuration files to /files/etc -cp -r ../deployment/* "$INSTALL_ETC" -cp ../default_config.hjson "$INSTALL_ETC"/config.hjson +copy_dir_contents "${RITA_DIR}/deployment" "$INSTALL_ETC" +copy_file "${RITA_DIR}/default_config.hjson" "$INSTALL_ETC/config.hjson" # copy over installed files to /opt -cp ../rita.sh "$INSTALL_OPT"/rita.sh -curl --fail --silent --show-error -o "$INSTALL_OPT"/zeek https://raw.githubusercontent.com/activecm/docker-zeek/master/zeek -chmod +x "$INSTALL_OPT"/zeek -curl --fail --silent --show-error -o "$INSTALL_OPT"/zeek_log_transport.sh https://raw.githubusercontent.com/activecm/zeek-log-transport/refs/heads/master/zeek_log_transport.sh -chmod +x "$INSTALL_OPT"/zeek_log_transport.sh -cp ../.env.production "$INSTALL_OPT"/.env -cp ../docker-compose.prod.yml "$INSTALL_OPT"/docker-compose.yml -cp ../LICENSE "$INSTALL_OPT"/LICENSE -cp ../README.md "$INSTALL_OPT"/README - - -cp ./install-rita-zeek-here-tmp.sh install-rita-zeek-here.sh +copy_file "${RITA_DIR}/rita.sh" "$INSTALL_OPT" +curl --fail --silent --show-error -o "${INSTALL_OPT}/zeek" https://raw.githubusercontent.com/activecm/docker-zeek/master/zeek +chmod +x "${INSTALL_OPT}/zeek" +curl --fail --silent --show-error -o "${INSTALL_OPT}/zeek_log_transport.sh" https://raw.githubusercontent.com/activecm/zeek-log-transport/refs/heads/master/zeek_log_transport.sh +chmod +x "${INSTALL_OPT}/zeek_log_transport.sh" +copy_file "${RITA_DIR}/.env.production" "${INSTALL_OPT}/.env" +copy_file "${RITA_DIR}/docker-compose.prod.yml" "${INSTALL_OPT}/docker-compose.yml" +copy_file "${RITA_DIR}/LICENSE" "${INSTALL_OPT}/LICENSE" +copy_file "${RITA_DIR}/README.md" "${INSTALL_OPT}/README" # update version variables for files that need them -if [ "$(uname)" == "Darwin" ]; then - sed -i'.bak' "s/RITA_REPLACE_ME/${VERSION}/g" "install-rita-zeek-here.sh" - sed -i'.bak' "s/REPLACE_ME/${VERSION}/g" "$BASE_DIR/install_rita.yml" - sed -i'.bak' "s/REPLACE_ME/${ZEEK_VERSION}/g" "$BASE_DIR/install_zeek.yml" - sed -i'.bak' "s/REPLACE_ME/${VERSION}/g" "$BASE_DIR/install_rita.sh" - sed -i'.bak' "s#ghcr.io/activecm/rita:latest#ghcr.io/activecm/rita:${VERSION}#g" "$INSTALL_OPT/docker-compose.yml" +if [[ "$(uname)" == "Darwin" ]]; then + sed -i'.bak' "s/RITA_REPLACE_ME/${VERSION}/g" "${INSTALLER_DIR}/install-rita-zeek-here.sh" + sed -i'.bak' "s/REPLACE_ME/${VERSION}/g" "${INSTALLER_DIR}/install_rita.yml" + sed -i'.bak' "s/REPLACE_ME/${ZEEK_VERSION}/g" "${INSTALLER_DIR}/install_zeek.yml" + sed -i'.bak' "s/REPLACE_ME/${VERSION}/g" "${INSTALLER_DIR}/install_rita.sh" + sed -i'.bak' "s#ghcr.io/activecm/rita:latest#ghcr.io/activecm/rita:${VERSION}#g" "${INSTALL_OPT}/docker-compose.yml" - rm "install-rita-zeek-here.sh.bak" - rm "$BASE_DIR/install_rita.yml.bak" - rm "$BASE_DIR/install_zeek.yml.bak" - rm "$BASE_DIR/install_rita.sh.bak" - rm "$INSTALL_OPT/docker-compose.yml.bak" + remove_file "${INSTALLER_DIR}/install-rita-zeek-here.sh.bak" + remove_file "${INSTALLER_DIR}/install_rita.yml.bak" + remove_file "${INSTALLER_DIR}/install_zeek.yml.bak" + remove_file "${INSTALLER_DIR}/install_rita.sh.bak" + remove_file "${INSTALL_OPT}/docker-compose.yml.bak" else - sed -i "s/RITA_REPLACE_ME/${VERSION}/g" ./install-rita-zeek-here.sh - sed -i "s/REPLACE_ME/${VERSION}/g" "$BASE_DIR/install_rita.yml" - sed -i "s/REPLACE_ME/${ZEEK_VERSION}/g" "$BASE_DIR/install_zeek.yml" - sed -i "s/REPLACE_ME/${VERSION}/g" "$BASE_DIR/install_rita.sh" - sed -i "s#ghcr.io/activecm/rita:latest#ghcr.io/activecm/rita:${VERSION}#g" "$INSTALL_OPT/docker-compose.yml" + sed -i "s/RITA_REPLACE_ME/${VERSION}/g" "${INSTALLER_DIR}/install-rita-zeek-here.sh" + sed -i "s/REPLACE_ME/${VERSION}/g" "${INSTALLER_DIR}/install_rita.yml" + sed -i "s/REPLACE_ME/${ZEEK_VERSION}/g" "${INSTALLER_DIR}/install_zeek.yml" + sed -i "s/REPLACE_ME/${VERSION}/g" "${INSTALLER_DIR}/install_rita.sh" + sed -i "s#ghcr.io/activecm/rita:latest#ghcr.io/activecm/rita:${VERSION}#g" "${INSTALL_OPT}/docker-compose.yml" fi - - - - -# ./build_image.sh - - -# create tar -tar -czf "rita-$VERSION.tar.gz" "$BASE_DIR" +# create tarball from staging folder +tar -czf "$OUTPUT_TARBALL" -C "$SCRIPT_DIR" "$(basename "$INSTALLER_DIR")" # delete staging folder -rm -rf "$BASE_DIR" - -# switch back to original working directory -popd > /dev/null +remove_dir "$INSTALLER_DIR" -echo "Finished generating installer." +status "Finished generating installer." \ No newline at end of file diff --git a/installer/install-rita-zeek-here-tmp.sh b/installer/install-rita-zeek-here.sh similarity index 57% rename from installer/install-rita-zeek-here-tmp.sh rename to installer/install-rita-zeek-here.sh index 4a01bde..1cf6d98 100644 --- a/installer/install-rita-zeek-here-tmp.sh +++ b/installer/install-rita-zeek-here.sh @@ -1,23 +1,18 @@ -#!/bin/bash -#This installs docker, rita, and zeek on the current system. -#V0.1.4 +#!/usr/bin/env bash +set -euo pipefail -#Run one of the following 3 command lines: -# curl -A Mozilla -fsSL https://github.com/activecm/rita/releases/latest/download/install-rita-zeek-here.sh | sudo bash - -# wget -U Mozilla -q -O - https://github.com/activecm/rita/releases/latest/download/install-rita-zeek-here.sh | sudo bash - -#or download the above file and run: -# sudo bash install-rita-zeek-here.sh +# This script downloads and installs rita, zeek, and all dependencies on the current system export RITA_VERSION="RITA_REPLACE_ME" export zeek_release='latest' export PATH="$PATH:/usr/local/bin/" echo 'export PATH=$PATH:/usr/local/bin/' | sudo tee -a /etc/profile.d/localpath.sh -if [ "$EUID" -ne 0 ]; then - Sudo="/usr/bin/sudo " +if [[ "$EUID" -ne 0 ]]; then + SUDO="/usr/bin/sudo " fi -$Sudo mkdir -p /usr/local/bin/ +$SUDO mkdir -p /usr/local/bin/ echo "==== Installing rita $RITA_VERSION ====" >&2 cd @@ -28,8 +23,8 @@ cd rita-${RITA_VERSION}-installer rita help &2 -$Sudo wget -O /usr/local/bin/zeek https://raw.githubusercontent.com/activecm/docker-zeek/master/zeek -$Sudo chmod +x /usr/local/bin/zeek +$SUDO wget -O /usr/local/bin/zeek https://raw.githubusercontent.com/activecm/docker-zeek/master/zeek +$SUDO chmod +x /usr/local/bin/zeek /usr/local/bin/zeek pull /dev/null +#!/usr/bin/env bash +set -euo pipefail +# Ansible Install Script +# This script installs Ansible on the current system using pipx. -source ./helper.sh +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" -#This script installs ansible and supporting tools needed for rita -#and/or AC-Hunter on a deb, rpm, port, or brew package -based system. -#It also patches all installed packages. +# cd to the directory where this script is located +pushd "$SCRIPT_DIR" > /dev/null -#The general aim is that this will work on multiple Linux distributions -#that use either .deb or .rpm packages, though more testing is needed. -#Please contact bill@activecountermeasures.com if you have any updates -#on errors or compatibility issues found. Many thanks to NG for -#the original idea and multiple improvements. +# load helper functions +HELPER_FILE="$SCRIPT_DIR/helper.sh" +[[ -f "$HELPER_FILE" ]] || { echo "Helper functions script not found: $HELPER_FILE" >&2; exit 1; } +# shellcheck disable=SC1090 +source "$HELPER_FILE" +# verify that user has sudo privileges +require_sudo -ansible_installer_version="0.3.7" - -#Uncomment one of the following lines to set the default program to download and install -data_needed="rita" - - - - -require_sudo() { - #Stops the script if the user does not have root priviledges and cannot sudo - #Additionally, sets $SUDO to "sudo" and $SUDO_E to "sudo -E" if needed. - - status "Checking sudo; if asked for a password this will be your user password on the machine running the installer." #================ - if [ "$EUID" -eq 0 ]; then - SUDO="" - SUDO_E="" - return 0 - elif sudo -v; then #Confirms I'm allowed to run commands via sudo - SUDO="sudo" - SUDO_E="sudo -E" - return 0 - else - #I'm _not_ allowed to run commands as sudo. - echo "It does not appear that user $USER has permission to run commands under sudo." >&2 - if grep -q '^wheel:' /etc/group ; then - fail "Please run usermod -aG wheel $USER as root, log out, log back in, and retry the install" - elif grep -q '^sudo:' /etc/group ; then - fail "Please run usermod -aG sudo $USER as root, log out, log back in, and retry the install" - else - fail "Please give this user the ability to run commands as root under sudo, log out, log back in, and retry the install" - fi - fi -} - - -tmp_dir() { - mkdir -p "$HOME/tmp/" - tdirname=`mktemp -d -q "$HOME/tmp/install-tools.XXXXXXXX" &2 - sleep 60 - done - fi - while ! $SUDO apt-get -q -y update >/dev/null ; do - echo "Error updating package metadata, perhaps because a system update is running; will wait 60 seconds and try again." >&2 - sleep 60 - done - while ! $SUDO apt-get -q -y upgrade >/dev/null ; do - echo "Error updating packages, perhaps because a system update is running; will wait 60 seconds and try again." >&2 - sleep 60 - done - while ! $SUDO apt-get -q -y install lsb-release >/dev/null ; do - echo "Error installing lsb-release, perhaps because a system update is running; will wait 60 seconds and try again." >&2 - sleep 60 - done - elif [ -x /usr/bin/yum -a -x /bin/rpm ]; then - $SUDO yum -q -e 0 makecache - $SUDO yum -q -e 0 -y update - $SUDO yum -y -q -e 0 -y install yum-utils - $SUDO yum -y -q -e 0 -y install redhat-lsb-core >/dev/null 2>/dev/null || /bin/true #If available, we install it. If not, we ignore the error and continue on. - if [ -s /etc/redhat-release -a -s /etc/os-release ]; then - . /etc/os-release - if [ "$VERSION_ID" = "7" ]; then - $SUDO yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - if [ ! -e /etc/centos-release ]; then - $SUDO yum -y install subscription-manager - $SUDO subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - fi - elif [ "$VERSION_ID" = "8" ]; then - $SUDO yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - if [ -e /etc/centos-release ]; then - $SUDO dnf config-manager --set-enabled powertools - else - $SUDO yum -y install subscription-manager - $SUDO subscription-manager repos --enable "codeready-builder-for-rhel-8-`/bin/arch`-rpms" - fi - fi - fi - $SUDO yum -q -e 0 makecache - fi -} - +# Install a required executable +# This function attempts to install a system package only if the corresponding binary does +# not already exist in PATH. This is only intended for tools that provide a real executable +# $1 = binary name to check for (e.g., "python3", "pip3", "curl") +# $2 = space-separated list of package names that provide the binary (preferred packages first) install_tool() { - #Install a program. $1 holds the name of the executable we need - #$2 is one or more packages that can supply that executable (put preferred package names early in the list). - - - binary="$1" - potential_packages="$2" + binary="$1" + potential_packages="$2" + + # if the binary already exists, nothing to do + if type -path "$binary" >/dev/null 2>&1; then + status "== $binary executable is already installed" + return 0 + fi + + status "== Installing package that contains $binary" + + # Ubuntu + if command -v apt-get >/dev/null 2>&1; then + for pkg in $potential_packages; do + if ! type -path "$binary" >/dev/null 2>&1; then + $SUDO apt-get -q -y install "$pkg" + fi + done + + # RHEL / CentOS / Rocky / Alma + elif command -v yum >/dev/null 2>&1; then + for pkg in $potential_packages; do + if ! type -path "$binary" >/dev/null 2>&1; then + $SUDO yum -y -q -e 0 install "$pkg" + fi + done + + else + fail "Unable to install packages: unsupported package manager" + fi + + # final verification + if type -path "$binary" >/dev/null 2>&1; then + return 0 + else + echo "WARNING: Unable to install $binary from system package" >&2 + return 1 + fi +} - if type -path "$binary" >/dev/null ; then - status "== $binary executable is installed." #================ - else - status "== Installing package that contains $binary" #================ - if [ -x /usr/bin/apt-get -a -x /usr/bin/dpkg-query ]; then - for one_package in $potential_packages ; do - if ! type -path "$binary" >/dev/null ; then #if a previous package was successfully able to install, don't try again. - $SUDO apt-get -q -y install $one_package - fi - done - elif [ -x /usr/bin/yum -a -x /bin/rpm ]; then - #Yum takes care of the lock loop for us - for one_package in $potential_packages ; do - if ! type -path "$binary" >/dev/null ; then #if a previous package was successfully able to install, don't try again. - $SUDO yum -y -q -e 0 install $one_package - fi - done +install_ansible() { + # Make sure venv support actually works on this system. + if ! python3 -m venv --help >/dev/null 2>&1; then + fail "Python venv support is missing on this system. Cannot continue." + fi + + # Bootstrap a local virtualenv whose only job is to host pipx, + # so we never touch system Python even on PEP 668 distros. + python3 -m venv .ansenv || fail "Unable to create Python virtual environment" + + # shellcheck disable=SC1091 + source .ansenv/bin/activate + + # Make sure pip in the venv is up to date before installing pipx + python3 -m pip install --upgrade pip || fail "Unable to upgrade pip in virtual environment" + + # install pipx into this bootstrap venv + python3 -m pip install pipx || fail "Unable to install pipx in virtual environment" + + # ask pipx to ensure ~/.local/bin is added to future shells' PATH + pipx ensurepath --prepend || true + + # install a specific, pinned ansible-core via pipx. + # pipx will create/own its own venv under ~/.local/pipx/venvs/ansible-core + # and expose the entrypoints (ansible, ansible-playbook, ansible-galaxy) + # under ~/.local/bin/. + pipx install "ansible-core==2.15.13" --force || fail "Unable to install ansible-core with pipx" + + deactivate + + # After deactivating, PATH is restored to its previous value, so any edits we + # made inside the venv are lost. Re-ensure ~/.local/bin is on PATH for the rest + # of this script so ansible/ansible-playbook/ansible-galaxy are visible. + case ":$PATH:" in + *":$HOME/.local/bin:"*) ;; + *) export PATH="$HOME/.local/bin:$PATH" ;; + esac + + # Sanity check: make sure the expected Ansible CLIs are now visible + for bin in ansible ansible-playbook ansible-galaxy; do + if ! command -v "$bin" >/dev/null 2>&1; then + fail "$bin not found in PATH after pipx installation" + fi + done + + # Link Ansible binaries globally so they work for root, users, cron, and systemd + status "Linking Ansible binaries globally..." + + for bin in ansible ansible-playbook ansible-galaxy; do + SRC="$(command -v "$bin" || true)" + if [ -n "$SRC" ]; then + $SUDO ln -sf "$SRC" "/usr/local/bin/$bin" else - fail "Neither (apt-get and dpkg-query) nor (yum, rpm, and yum-config-manager) is installed on the system" + fail "Unable to locate $bin for global linking" fi - fi + done - if type -path "$binary" >/dev/null ; then - return 0 - else - echo "WARNING: Unable to install $binary from a system package" >&2 - return 1 - fi + # install requisite ansible collections + status "Installing required Ansible collections..." + ansible-galaxy collection install community.general community.docker --force } -install_ansible() { - # install pipx package in a Python virtual environment (PEP 668 mitigation) - python3 -m venv .ansenv - source .ansenv/bin/activate - - python3 -m pip install pipx - - pipx ensurepath --prepend - # install ansible and ansible-core via pipx - pipx install ansible ansible-core==2.15.13 --force - deactivate - - # prepend ~/.local/bin to path if not present - [[ ":$PATH:" != *":$HOME/.local/bin:"* ]] && PATH="$HOME/.local/bin:${PATH}" -} - -echo "ansible_installer version $ansible_installer_version" >&2 - -#FIXME We no longer need these choices, remove the following block -#if [ -n "$1" ]; then -# if [ "$1" = "rita" ]; then -# data_needed="rita" -# shift -# elif [ "$1" = "achunter" ]; then -# data_needed="achunter" -# shift -# else -# install_target="$1" -# shift -# fi -#fi -#if [ -n "$1" ]; then -# install_target="$1" -#fi -# -#if [ -z "$install_target" ]; then -# install_target="localhost" -#fi +# ======== main script starts here ======== +# require sudo privileges require_sudo -# check if macOS -if [ "$(uname)" == "Darwin" ]; then +# check if macOS, and install ansible via brew if so +if [[ "$(uname)" == "Darwin" ]]; then # check if ansible is installed which -s ansible if [[ $? != 0 ]] ; then @@ -278,58 +189,79 @@ if [ "$(uname)" == "Darwin" ]; then else echo "== Ansible is already installed." fi - # FIXME - # exit to avoid fubaring mac - # fail "bingbong" -else - patch_system - +else # assume linux + # enable necessary repositories enable_repositories + status "Installing required tools..." - status "Installing needed tools" #================ + # install python dependencies install_tool python3 "python3" - install_tool pip3 "python3-pip" #Note, oracle linux does not come with pip at all. The "python3-pip-wheel" package does not include pip. - install_tool venv "python3-venv" - python3 -m pip -V ; retcode="$?" - if [ "$retcode" != 0 ]; then - fail "Unable to run python3's pip, exiting." + install_tool pip3 "python3-pip" + + # ensure python venv support is available - cannot use install_tool for this since + # venv is a module, not a binary + # Ensure `python3 -m venv` actually works on Debian/Ubuntu. + # On these systems the stdlib venv module requires the extra + # `python3-venv` package (which provides ensurepip). + # This is safe to run even if it's already installed: `apt-get install` + # is idempotent and will return success in that case. + if command -v apt-get >/dev/null 2>&1; then + status "Ensuring python3-venv is installed for virtualenv support" + $SUDO apt-get -q -y install python3-venv fi + # sanity check after install + if ! python3 -m venv --help >/dev/null 2>&1; then + fail "python3 venv module is still not available after installation" + fi + + # verify that pip is functional + if ! python3 -m pip -V >/dev/null 2>&1; then + fail "Unable to run python3's pip" + fi + # install other dependencies install_tool wget "wget" install_tool curl "curl" - install_tool sha256sum "coreutils" + # install ansible install_ansible fi +# ensure /usr/local/bin is in PATH +status "Ensuring /usr/local/bin is in PATH..." +if ! printf '%s\n' "$PATH" | grep -qE '(^|:)/usr/local/bin(:|$)'; then + echo "Adding /usr/local/bin to PATH" >&2 + + # For current session + export PATH="$PATH:/usr/local/bin" + + # For future logins (prefer system-wide drop-in if available) + if [ -d /etc/profile.d ]; then + echo 'export PATH="$PATH:/usr/local/bin"' | $SUDO tee /etc/profile.d/local-bin-path.sh >/dev/null + elif [ -s /etc/profile ]; then + echo 'export PATH="$PATH:/usr/local/bin"' | $SUDO tee -a /etc/profile >/dev/null + else + echo "Warning: Unable to persist /usr/local/bin in PATH" >&2 + fi +fi -#We need to install zeek through the rita installer in order to install both -#install_tool zeek "zeek" -#install_tool zeekctl "zeekctl" - - - - +# switch back to original working directory +popd > /dev/null -status "Preparing this system" #================ -#Try to add /usr/local/bin/ to path (though the better way is to log out and log back in) -if ! echo "$PATH" | grep -q '/usr/local/bin' ; then - echo "Adding /usr/local/bin to path" >&2 - #For this login only... - export PATH="$PATH:/usr/local/bin/" - #...and for future logins - if [ -s /etc/environment ]; then - echo 'export PATH="$PATH:/usr/local/bin/"' | $SUDO tee -a /etc/environment >/dev/null - elif [ -s /etc/profile ]; then - echo 'export PATH="$PATH:/usr/local/bin/"' | $SUDO tee -a /etc/profile >/dev/null - else - echo "Unable to add /usr/local/bin/ to path." >&2 - fi +status "Final verification..." +# verify the binary is resolvable +if ! command -v ansible-playbook >/dev/null 2>&1; then + fail "ansible-playbook is not in PATH after installation" +fi +# verify ansible-playbook executes +if ! ansible-playbook --version >/dev/null 2>&1; then + fail "ansible-playbook is present but failed to execute" +fi +# verify ansible-galaxy executes +if ! ansible-galaxy --version >/dev/null 2>&1; then + fail "ansible-galaxy is present but failed to execute" fi -# install requisite ansible collections -ansible-galaxy collection install community.general community.docker --force - -popd > /dev/null +status "Ansible installation complete" \ No newline at end of file diff --git a/installer/install_scripts/helper.sh b/installer/install_scripts/helper.sh index e66a1b6..54ca328 100755 --- a/installer/install_scripts/helper.sh +++ b/installer/install_scripts/helper.sh @@ -1,23 +1,189 @@ -#!/bin/bash +#!/usr/bin/env bash +# helper.sh must be sourced, not executed +if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + echo "This script must be sourced, not executed." >&2 + exit 1 +fi -RED=$(tput setaf 1) -YELLOW=$(tput setaf 3) -NORMAL=$(tput sgr0) +RED="" +YELLOW="" +BLUE="" +GREEN="" +NORMAL="" +# SUDO and SUDO_E are intentionally initialized to empty here. +# helper functions will never use sudo unless a script explicitly opts in +# by calling require_sudo(). This prevents accidental privilege escalation in scripts that source +# helper.sh but are not intended to run as root. +SUDO="" +SUDO_E="" +export SUDO SUDO_E -fail() { - #Something failed, exit. +# enable verbose output by default +verbose="yes" + +# use colors if terminal supports it +if [[ -t 1 ]] && command -v tput >/dev/null 2>&1; then + RED=$(tput setaf 1) + YELLOW=$(tput setaf 3) + BLUE=$(tput setaf 4) + GREEN=$(tput setaf 2) + NORMAL=$(tput sgr0) +fi - echo "${RED}$@, exiting.${NORMAL}" >&2 +# something failed, exit +fail() { + echo "${RED}$*, exiting.${NORMAL}" >&2 exit 1 } - +# print status message if verbose is enabled status() { - if [ "$verbose" = 'yes' ]; then - echo "== $@" >&2 + if [[ "${verbose:-}" == "yes" ]]; then + echo "== $*" >&2 fi } -verbose="yes" +# ensure script is run with sudo privileges +require_sudo() { + # check if running as root + if [[ "$EUID" -eq 0 ]]; then + SUDO="" + SUDO_E="" + export SUDO SUDO_E + return 0 + fi + + # check that we are able to run commands with sudo (non-interactive) + if sudo -v /dev/null; then + SUDO="sudo" + SUDO_E="sudo -E" + export SUDO SUDO_E + return 0 + fi + + fail "Missing administrator privileges. Please run with an account that has sudo privileges." +} + +# require that a command exists +require_cmd() { + command -v "$1" >/dev/null 2>&1 || fail "Required command not found: $1" +} + +# require that a file exists +require_file() { + [[ -f "$1" ]] || { fail "Required file not found: $1"; } +} + +# require that a directory exists +require_dir() { + [[ -d "$1" ]] || { fail "Required directory not found: $1"; } +} + +# require that an environment variable is set and non-empty +require_env() { + [[ -n "${!1:-}" ]] || fail "Required environment variable not set or empty: $1" +} + +# copy a file, ensuring the source exists and the destination parent directory exists +copy_file() { + local src="$1" + local dst="$2" + + require_file "$src" + + # if user provided a directory as destination, copy into that directory + if [[ -d "$dst" ]]; then + cp -- "$src" "$dst" || fail "Failed to copy file from $src to $dst" + require_file "$dst/$(basename "$src")" + else + # if user provided a full destination path (including filename), ensure parent directory exists + require_dir "$(dirname "$dst")" + cp -- "$src" "$dst" || fail "Failed to copy file from $src to $dst" + require_file "$dst" + fi +} + + +# copy a directory recursively, ensuring the source exists and the destination parent directory exists +copy_dir() { + require_dir "$1" + require_dir "$2" + cp -r -- "$1" "$2" || fail "Failed to copy directory from $1 to $2" +} + +# this version does not copy the dir itself, nor any dotfiles inside it +copy_dir_contents() { + require_nonempty_dir "$1" + require_dir "$2" + cp -r -- "$1"/* "$2" || fail "Failed to copy contents from $1 to $2" +} + +create_new_dir() { + if [[ -e "$1" ]]; then + fail "Failed to create new directory, path already exists: $1" + fi + mkdir -p -- "$1" || fail "Failed to create directory: $1" + require_dir "$1" +} + +# check if a directory exists and is empty +dir_is_empty() { + [[ -d "$1" ]] || return 1 + [[ -z "$(ls -A "$1" 2>/dev/null)" ]] +} + +# ensure a directory exists; create it if missing +ensure_dir() { + [[ -d "$1" ]] && return 0 + create_new_dir "$1" +} + +# ensure a directory exists (create if not) and is empty +ensure_empty_dir() { + ensure_dir "$1" + if ! dir_is_empty "$1"; then + clear_dir "$1" + fi +} + +# require that a directory has at least one non-dot entry (file or dir) +require_nonempty_dir() { + require_dir "$1" + if ! find "$1" -mindepth 1 -maxdepth 1 ! -name '.*' -print -quit | grep -q .; then + fail "Directory is empty: $1" + fi +} + +# remove a file (or symlink) if it exists +remove_file() { + local path="$1" + if [[ -e "$path" || -L "$path" ]]; then + rm -f -- "$path" || fail "Failed to remove file: $path" + fi +} + +# remove a directory if it exists +remove_dir() { + if [[ -d "$1" ]]; then + rm -rf -- "$1" || fail "Failed to remove directory: $1" + elif [[ -e "$1" || -L "$1" ]]; then + fail "Expected directory but found non-directory: $1" + fi +} + +# delete everything inside a directory, but keep the directory itself +clear_dir() { + [[ -n "${1:-}" ]] || fail "clear_dir: missing dir" + local dir="$1" + + if [[ -e "$dir" || -L "$dir" ]]; then + [[ -d "$dir" ]] || fail "Expected directory but found non-directory: $dir" + else + ensure_dir "$dir" + fi + + # Delete contents (including dotfiles) but not the directory itself. + find -- "$dir" -mindepth 1 -maxdepth 1 -exec rm -rf -- {} + || fail "Failed to clear directory: $dir" +} \ No newline at end of file diff --git a/installer/install_scripts/install_post.yml b/installer/install_scripts/install_post.yml deleted file mode 100644 index a09adc7..0000000 --- a/installer/install_scripts/install_post.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -# ansible install playbook for rita V2. -# Version: 202408061413 -# sample runs: -# Optional: Add the following block, without #'s to /etc/ansible/hosts (or /opt/local/etc/ansible/hosts if using ansible on mac with mac ports). -# The hosts must each be on their own line. These can be full or short hostnames or a name following "Host" in ~/.ssh/config. -# -# [allritas] -# ro810 -# ub2404 -# -# Then run this, with a comma separated list of hostnames from the above file with a comma at the end of the list: -# -# ansible-playbook -C -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/rita-install.yml | grep -v '^skipping: ' #-C (no changes) means do a dry run -# ansible-playbook -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/rita-install.yml | grep -v '^skipping: ' - -# Many thanks to but-i-am-dominator for his help with this playbook. - -- name: "RITA Post: Reboot checks." - hosts: "{{ install_hosts }}" - become: true - - vars: - ansible_python_interpreter: /bin/python3 # Centos 7 defaults to using python2, so we force python 3. This change does not break any other distros - - #Late tasks, including rebooting - post_tasks: - - name: "RITA Post: Check if reboot required on rpm-based systems." - command: needs-restarting -r - register: reboot_result - ignore_errors: True - failed_when: reboot_result.rc is not defined - when: ansible_distribution == 'AlmaLinux' or ansible_distribution == 'CentOS' or ansible_distribution == 'Fedora' or ansible_distribution == 'OracleLinux' or ansible_distribution == 'RedHat' or ansible_distribution == 'Rocky' - tags: - - packages - - linux - - linuxrpm - - - name: "RITA Post: Check if reboot required on deb-based systems." - register: reboot_required_file - stat: - path: /var/run/reboot-required - get_checksum: no - ignore_errors: True - when: ansible_distribution == 'Debian' or ansible_distribution == 'Kali' or ansible_distribution == 'Pop!_OS' or ansible_distribution == 'Ubuntu' or ansible_distribution == 'Zorin OS' - tags: - - packages - - linux - - linuxdeb - - - name: "RITA Post: Rebooting system if needed." - reboot: - reboot_timeout: 120 - when: - - ansible_connection != 'local' - - (reboot_required_file.stat is defined and reboot_required_file.stat.exists) or (reboot_result.rc is defined and reboot_result.rc == 1) - register: reboot_status - ignore_errors: True #If unable to reboot (as ansible refuses to do if installing to localhost) we leave the error at the end of the output but don't treat it as a failure. - tags: - - packages - - linux - - linuxdeb - - linuxrpm diff --git a/installer/install_scripts/install_pre.yml b/installer/install_scripts/install_pre.yml index 4212540..b7a0b7e 100644 --- a/installer/install_scripts/install_pre.yml +++ b/installer/install_scripts/install_pre.yml @@ -1,293 +1,73 @@ ---- -# ansible install playbook that does the prep work for rita V2. -# Version: 202408061413 -# sample runs: -# Optional: Add the following block, without #'s to /etc/ansible/hosts (or /opt/local/etc/ansible/hosts if using ansible on mac with mac ports). -# The hosts must each be on their own line. These can be full or short hostnames or a name following "Host" in ~/.ssh/config. -# -# [allritas] -# ro810 -# ub2404 -# -# Then run this, with a comma separated list of hostnames from the above file with a comma at the end of the list: -# -# ansible-playbook -C -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/rita-install.yml | grep -v '^skipping: ' #-C (no changes) means do a dry run -# ansible-playbook -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/rita-install.yml | grep -v '^skipping: ' +# ansible playbook that performs pre-installation tasks for RITA -# Many thanks to but-i-am-dominator for his help with this playbook. - -- name: "RITA Pre: System prep and checks." +- name: "RITA Pre: System prep and checks" hosts: "{{ install_hosts }}" become: true - vars: - ansible_python_interpreter: /bin/python3 # Centos 7 defaults to using python2, so we force python 3. This change does not break any other distros + ansible_python_interpreter: /bin/python3 - #Early tasks needed to support the rest of the install + # early tasks for checking system compatibility pre_tasks: - #Known distribution? - - name: "RITA Pre: Checking Linux distribution." + - name: "RITA Pre: Checking Linux distribution" ansible.builtin.fail: - msg: "Distribution name: {{ ansible_distribution }} does not appear to be recognized - please contact ACM" - when: ( ansible_distribution != 'AlmaLinux' and ansible_distribution != 'CentOS' and ansible_distribution != 'Fedora' and ansible_distribution != 'OracleLinux' and ansible_distribution != 'Pop!_OS' and ansible_distribution != 'Rocky' and ansible_distribution != 'Debian' and ansible_distribution != 'Ubuntu' and ansible_distribution != 'Kali' and ansible_distribution != 'Zorin OS' and ansible_distribution != 'RedHat' ) + msg: "Distribution {{ ansible_distribution }} is not supported, please see the documentation for supported distributions." + when: + - ansible_distribution != 'CentOS' + - ansible_distribution != 'Rocky' + - ansible_distribution != 'Ubuntu' + - ansible_distribution != 'RedHat' + - ansible_distribution != 'AlmaLinux' #NOTE: legacy support for AlmaLinux 8 only tags: - linux - - name: "RITA Pre: Checking Linux distribution version." + - name: "RITA Pre: Checking Linux distribution version" ansible.builtin.fail: - msg: "Warning: Linux distribution {{ ansible_distribution }} {{ ansible_distribution_major_version }} may not have been tested - please contact ACM and report whether the install worked or not" - when: ( ( ansible_distribution == 'AlmaLinux' and (ansible_distribution_major_version != '9') ) or ( ansible_distribution == 'CentOS' and (ansible_distribution_major_version != '7' and ansible_distribution_major_version != '9') ) or ( ansible_distribution == 'Fedora' and (ansible_distribution_major_version != '40') ) or ( ansible_distribution == 'OracleLinux' and (ansible_distribution_major_version != '9') ) or ( ansible_distribution == 'Pop!_OS' and (ansible_distribution_major_version != '22') ) or ( ansible_distribution == 'Rocky' and (ansible_distribution_major_version != '8' and ansible_distribution_major_version != '9') ) or ( ansible_distribution == 'Debian' and (ansible_distribution_major_version != '12') ) or ( ansible_distribution == 'Kali' and (ansible_distribution_major_version != '2024') ) or ( ansible_distribution == 'Ubuntu' and (ansible_distribution_major_version != '20' and ansible_distribution_major_version != '22' and ansible_distribution_major_version != '24') ) or ( ansible_distribution == 'Zorin OS' and (ansible_distribution_major_version != '16') ) or ( ansible_distribution == 'RedHat' and (ansible_distribution_major_version != '9') ) ) - ignore_errors: True #We print a warning but do not abort if this is an unknown combination of distribution and major version. + msg: "Distribution {{ ansible_distribution }} {{ ansible_distribution_major_version }} is not supported." + when: + - > + (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version not in ['22', '24']) or + (ansible_distribution == 'CentOS' and ansible_distribution_major_version != '9') or + (ansible_distribution == 'RedHat' and ansible_distribution_major_version not in ['8', '9']) or + (ansible_distribution == 'AlmaLinux' and ansible_distribution_major_version != '8') tags: - linux - #CPU Architecture - - name: "RITA Pre: Check system architecture." + - name: "RITA Pre: Check CPU architecture" ansible.builtin.fail: - msg: "Unsupported CPU architecture: {{ ansible_architecture }}" + msg: "CPU architecture {{ ansible_architecture }} is not supported." when: ( ansible_architecture != "x86_64" ) #and ansible_architecture != "aarch64" ) # "aarch64" for pi. #pi0w is armv6l. i386. amd64? - #Add tools needed by later stages - # Provides "needs-restarting" for ansible's ability to manage rebooting after patching - - name: "RITA Pre: Check for yum-utils before proceeding." - command: rpm -qa | grep yum-utils - check_mode: true - changed_when: false - register: package_check - when: ( ansible_distribution == 'AlmaLinux' or ansible_distribution == 'CentOS' or ansible_distribution == 'Fedora' or ansible_distribution == 'OracleLinux' or ansible_distribution == 'RedHat' or ansible_distribution == 'Rocky' ) - tags: - - packages - - linux - - linuxrpm - - - name: "RITA Pre: Install yum-utils if not found." - package: - name: yum-utils - state: latest - when: ( ansible_distribution == 'AlmaLinux' or ansible_distribution == 'CentOS' or ansible_distribution == 'Fedora' or ansible_distribution == 'OracleLinux' or ansible_distribution == 'RedHat' or ansible_distribution == 'Rocky' ) and '"yum-utils" not in package_check' - tags: - - packages - - linux - - linuxrpm - - # Install aptitude, preferred by ansible for package management on Debian/Ubuntu - - name: "RITA Pre: Install aptitude on debian-based system." - apt: - name: aptitude - state: latest - update_cache: true - cache_valid_time: 3600 - when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Pop!_OS' or ansible_distribution == 'Ubuntu' or ansible_distribution == 'Zorin OS' ) #While Kali is based on Debian, it does not include the aptitude package. - tags: - - packages - - linux - - linuxdeb - tasks: - # Make sure all rpm packages up to date, add packages - - name: "RITA Pre: Patch and install packages on rpm-based servers." - block: - - name: "RITA Pre: Patch all rpm-based servers." - yum: #We use the "yum" module insteead of dnf to support rpm distros that only have yum - name: "*" - state: latest - skip_broken: yes - update_cache: yes - tags: - - packages - - linux - - linuxrpm - - - name: "RITA Pre: Install rpm packages on rpm-based distributions." - yum: - name: - - nano - - nmap-ncat - - dnf-plugins-core #Provides config-manager binary on Fedora - - wget - - lshw #For user troubleshooting - - net-tools #For user troubleshooting - state: latest - update_cache: true - tags: - - packages - - linux - - linuxrpm - when: ( ansible_distribution == 'AlmaLinux' or ansible_distribution == 'CentOS' or ansible_distribution == 'Fedora' or ansible_distribution == 'OracleLinux' or ansible_distribution == 'RedHat' or ansible_distribution == 'Rocky' ) - - - name: "RITA Pre: Install pip on Centos/Fedora." - yum: - name: - - python3-pip - state: latest - update_cache: true - tags: - - packages - - linux - - linuxrpm - when: ( ansible_distribution == 'AlmaLinux' or ansible_distribution == 'CentOS' or ansible_distribution == 'Fedora' or ansible_distribution == 'RedHat' or ansible_distribution == 'Rocky' ) - # or ansible_distribution == 'OracleLinux' #Note: OracleLinux, and therefore SecurityOnion too, do not include pip3. Disabled. - - - name: "RITA Pre: Patch and install packages on debian-based servers." - block: - - name: "RITA Pre: Patch all debian-based servers." - apt: - name: "*" - state: latest - update_cache: yes - cache_valid_time: 3600 - tags: - - packages - - linux - - linuxdeb - - - name: "RITA Pre: Install apt packages on deb-based distributions." - apt: - pkg: - - nano - #Following are to support docker - - apt-transport-https - - ca-certificates - - curl - - python3-pip - - python3-setuptools - - wget - #Following is for user troubleshooting - - net-tools - state: latest - update_cache: true - cache_valid_time: 3600 - tags: - - packages - - linux - - linuxdeb - when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Kali' or ansible_distribution == 'Pop!_OS' or ansible_distribution == 'Ubuntu' or ansible_distribution == 'Zorin OS' ) - - - name: "RITA Pre: Install packages on Debian and Ubuntu." - apt: - pkg: - - ncat #"ncat" is nmap's netcat on Ubuntu and Debian, listd but not available on Kali - - software-properties-common - - virtualenv - - lshw #listed, but somehow not available on Kali - state: latest - update_cache: true - cache_valid_time: 3600 - tags: - - packages - - linux - - linuxdeb - when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Pop!_OS' or ansible_distribution == 'Ubuntu' or ansible_distribution == 'Zorin OS' ) - - - name: "RITA Pre: Install packages on Kali." - apt: - pkg: - - netcat-traditional - - python3-virtualenv - state: latest - update_cache: true - cache_valid_time: 3600 - tags: - - packages - - linux - - linuxdeb - when: ( ansible_distribution == 'Kali' ) - # Add repositories # Note that apt-key is deprecated and that directly downloading the key to trusted.gpg.d WITH A .asc EXTENSION is the correct way now. - - name: "RITA Pre: Add Docker Ubuntu GPG apt key." + - name: "RITA Pre: Download Docker Ubuntu GPG apt key" block: - - name: "RITA Pre: Download Docker Ubuntu GPG apt key with get_url module." + - name: "RITA Pre: Download Docker Ubuntu GPG apt key with get_url module" get_url: url: https://download.docker.com/linux/ubuntu/gpg dest: /etc/apt/trusted.gpg.d/docker-ubuntu.asc mode: "0644" force: true rescue: - - name: "RITA Pre: Download failed with get_url module. Falling back to curl." + - name: "RITA Pre: Download failed with get_url module. Falling back to curl" shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/trusted.gpg.d/docker-ubuntu.asc - when: ( ansible_distribution == 'Ubuntu' ) + when: ansible_distribution == 'Ubuntu' tags: - packages - linux - linuxdeb - - name: "RITA Pre: Add Docker Debian GPG apt key." - block: - - name: "RITA Pre: Download Docker Ubuntu GPG apt key with get_url module." - get_url: - url: https://download.docker.com/linux/debian/gpg - dest: /etc/apt/trusted.gpg.d/docker-debian.asc - mode: "0644" - force: true - rescue: - - name: "RITA Pre: Download failed with get_url module. Falling back to curl." - shell: curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/trusted.gpg.d/docker-debian.asc - when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Kali' or ansible_distribution == 'Pop!_OS' or ansible_distribution == 'Zorin OS' ) - tags: - - packages - - linux - - linuxdeb - - - name: "RITA Pre: Add Docker Repository to Ubuntu or Debian." + - name: "RITA Pre: Add Docker Repository to Ubuntu" apt_repository: repo: deb https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} stable state: present - when: ( ansible_distribution == 'Ubuntu' or ansible_distribution == 'Debian' ) - tags: - - packages - - linux - - linuxdeb - - - name: "RITA Pre: Add Docker Repository to Kali." - apt_repository: - repo: deb https://download.docker.com/linux/debian bookworm stable - state: present - when: ( ansible_distribution == 'Kali' and ansible_distribution_major_version == '2024' ) - #Debian bookworm appears to be the right one to use according to https://www.kali.org/docs/containers/installing-docker-on-kali/ - tags: - - packages - - linux - - linuxdeb - - - name: "RITA Pre: Add Docker Repository to PopOS." - apt_repository: - repo: deb https://download.docker.com/linux/ubuntu jammy stable - state: present - when: ( ansible_distribution == 'Pop!_OS' and ansible_distribution_major_version == '22' ) - #Ubuntu jammy appears to be the right one to use. - tags: - - packages - - linux - - linuxdeb - - - name: "RITA Pre: Add Docker Repository to Zorin." - apt_repository: - repo: deb https://download.docker.com/linux/ubuntu focal stable - state: present - when: ( ansible_distribution == 'Zorin OS' and ansible_distribution_major_version == '16' ) - #Ubuntu focal appears to be the right one to use. + when: ansible_distribution == 'Ubuntu' tags: - packages - linux - linuxdeb - - name: "RITA Pre: Add Docker repository to Fedora distributions." - yum_repository: - name: docker-ce - description: Docker package repository - gpgkey: https://download.docker.com/linux/fedora/gpg - baseurl: https://download.docker.com/linux/fedora/$releasever/$basearch/stable/ - state: present - enabled: true - when: ( ansible_distribution == 'Fedora' ) # and ansible_distribution_major_version == '40' ) - tags: - - packages - - linux - - linuxrpm - - - name: "RITA Pre: Add Docker Repository to AlmaLinux/Centos/OracleLinux/Rocky distributions." - #shell: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + - name: "RITA Pre: Add Docker Repository to Centos and Rocky distributions" #Alma is included for legacy support yum_repository: name: docker-ce description: Docker package repository @@ -295,13 +75,13 @@ baseurl: https://download.docker.com/linux/centos/$releasever/$basearch/stable/ state: present enabled: true - when: ( ansible_distribution == 'AlmaLinux' or ansible_distribution == 'CentOS' or ansible_distribution == 'OracleLinux' or ansible_distribution == 'Rocky' ) + when: ansible_distribution == 'CentOS' or ansible_distribution == 'Rocky' or ansible_distribution == 'AlmaLinux' tags: - packages - linux - linuxrpm - - name: "RITA Pre: Add Docker Repository to RHEL distribution." + - name: "RITA Pre: Add Docker Repository to RHEL distribution" yum_repository: name: docker-ce description: Docker package repository @@ -309,16 +89,16 @@ baseurl: https://download.docker.com/linux/rhel/$releasever/$basearch/stable/ state: present enabled: true - when: ( ansible_distribution == 'RedHat' ) + when: ansible_distribution == 'RedHat' tags: - packages - linux - linuxrpm - #Install docker - - name: "RITA Pre: Install docker on debian-based distributions." + # Install docker + - name: "RITA Pre: Install docker on Ubuntu" block: - - name: "RITA Pre: Uninstall unofficial docker packages on debian-based distributions." + - name: "RITA Pre: Uninstall unofficial docker packages on Ubuntu" apt: name: - docker-client @@ -341,7 +121,7 @@ - linux - linuxdeb - - name: "RITA Pre: Install docker-ce on debian-based distributions." + - name: "RITA Pre: Install docker-ce on Ubuntu" apt: name: - docker-ce @@ -356,20 +136,20 @@ - linux - linuxdeb - - name: "RITA Pre: Install docker modules for Python on deb-based distributions." + - name: "RITA Pre: Install docker modules for Python on Ubuntu" apt: name: - python3-docker - - python3-requests #We'll have to see if debian/ubuntu can work with the stock (2.28.1 in debian 12.05 / 2.31.0 in ubuntu 24.04) + - python3-requests tags: - docker - linux - linuxdeb - when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Kali' or ansible_distribution == 'Pop!_OS' or ansible_distribution == 'Ubuntu' or ansible_distribution == 'Zorin OS' ) + when: ansible_distribution == 'Ubuntu' - - name: "RITA Pre: Install docker on rpm-based distributions." + - name: "RITA Pre: Install docker on rpm-based distributions" block: - - name: "RITA Pre: Uninstall unofficial docker packages on rpm-based distributions." + - name: "RITA Pre: Uninstall unofficial docker packages on rpm-based distributions" yum: name: - docker-client @@ -394,7 +174,7 @@ - linux - linuxrpm - - name: "RITA Pre: Install docker-ce on rpm-based distributions." + - name: "RITA Pre: Install docker-ce on rpm-based distributions" yum: name: - docker-ce @@ -408,31 +188,20 @@ - docker - linux - linuxrpm - when: ( ansible_distribution == 'AlmaLinux' or ansible_distribution == 'CentOS' or ansible_distribution == 'Fedora' or ansible_distribution == 'OracleLinux' or ansible_distribution == 'RedHat' or ansible_distribution == 'Rocky' ) + when: ansible_distribution == 'CentOS' or ansible_distribution == 'RedHat' or ansible_distribution == 'Rocky' or ansible_distribution == 'AlmaLinux' - - name: "RITA Pre: Start and enable docker in systemd." + - name: "RITA Pre: Start and enable docker in systemd" systemd: name: docker state: started enabled: yes - when: ( ansible_distribution != 'OracleLinux' ) tags: - docker - linux - linuxdeb - linuxrpm - #It appears the "docker modules for python on rpm-based linux" is needed to use the ansible "systemd" module, so we can't use that module on OracleLinux... - - #...so we fall back on starting and enabling it on OracleLinux by hand. - - name: "RITA Pre: Start and enable docker in systemd on OracleLinux." - shell: systemctl enable docker.service ; systemctl start docker.service - when: ( ansible_distribution == 'OracleLinux' ) - tags: - - docker - - linux - - linuxrpm - - name: "RITA Pre: Transfer docker-compose script to target system for backwards compatibility." + - name: "RITA Pre: Transfer docker-compose script to target system for backwards compatibility" copy: src: docker-compose dest: /usr/local/bin/docker-compose diff --git a/installer/install_scripts/install_rita.sh b/installer/install_scripts/install_rita.sh index 89ed4f3..c927852 100755 --- a/installer/install_scripts/install_rita.sh +++ b/installer/install_scripts/install_rita.sh @@ -1,67 +1,81 @@ -#!/bin/bash +#!/usr/bin/env bash +set -euo pipefail + +# RITA Install Script +# This script installs Ansible and uses it to install RITA and Zeek on a target system. RITA_VERSION="REPLACE_ME" _INSTALL_ZEEK=true -set -e +# Function `show_help` displays usage information +show_help() { + echo "Usage: $0 [--disable-zeek] " >&2 + echo "Example: $0 127.0.0.1" >&2 + exit 1 +} -if [ "z$1" = "z--disable-zeek" ]; then - _INSTALL_ZEEK=false - shift +# No arguments provided +if [[ $# -eq 0 ]]; then + show_help fi -if [ -n "$1" ]; then - install_target="$1" - shift -else - echo "Please add the name of the system on which you want rita installed as a command line option. If you want to install rita on this computer, use 127.0.0.1 ." >&2 - echo "The final command will look like:" >&2 - echo "$0 the_computer_name_or_ip_on_which_to_install_rita" >&2 - exit 1 + +# Parse optional flag +if [[ "${1:-}" = "--disable-zeek" ]]; then + _INSTALL_ZEEK=false + shift +fi + +# Hostname/IP must now be present +if [[ $# -eq 0 ]]; then + show_help fi -if [ "z$1" = "z--disable-zeek" ]; then - _INSTALL_ZEEK=false - shift + +install_target="$1" +shift + +# If someone puts --disable-zeek after the host, still support it: +if [[ "${1:-}" = "--disable-zeek" ]]; then + _INSTALL_ZEEK=false + shift fi -# change working directory to directory of this script +# Change working directory to directory of this script pushd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" > /dev/null +# Load helper functions source ./scripts/helper.sh - +# Install ansible ./scripts/ansible-installer.sh -# prepend ~/.local/bin to path if not present -[[ ":$PATH:" != *":$HOME/.local/bin:"* ]] && PATH="$HOME/.local/bin:${PATH}" - - -status "Installing rita via ansible on $install_target" #================ -if [ "$install_target" = "localhost" -o "$install_target" = "127.0.0.1" -o "$install_target" = "::1" ]; then - if [ "$(uname)" = "Darwin" ]; then +# Install rita +status "Installing rita via ansible on $install_target" +if [[ "$install_target" = "localhost" || "$install_target" = "127.0.0.1" || "$install_target" = "::1" ]]; then + if [[ "$(uname)" = "Darwin" ]]; then # TODO support macOS install target echo "${YELLOW}Installing RITA via Ansible on the local system is not yet supported on MacOS.${NORMAL}" exit 1 fi - status "If asked for a 'BECOME password', that is your non-root sudo password on this machine ." - if [ "$_INSTALL_ZEEK" = 'true' ]; then - ansible-playbook --connection=local -K -i "127.0.0.1," -e "install_hosts=127.0.0.1," install_pre.yml install_rita.yml install_zeek.yml install_post.yml + status "When prompted for a BECOME password, enter your sudo password. If your user does not need one for sudo, just press Enter." + if [[ "$_INSTALL_ZEEK" = 'true' ]]; then + ansible-playbook --connection=local -K -i "127.0.0.1," -e "install_hosts=127.0.0.1," install_pre.yml install_rita.yml install_zeek.yml else - ansible-playbook --connection=local -K -i "127.0.0.1," -e "install_hosts=127.0.0.1," install_pre.yml install_rita.yml install_post.yml + ansible-playbook --connection=local -K -i "127.0.0.1," -e "install_hosts=127.0.0.1," install_pre.yml install_rita.yml fi else - status "Setting up future ssh connections to $install_target . You may be asked to provide your ssh password to $install_target ." #================ - ./scripts/sshprep "$install_target" - status "If asked for a 'BECOME password', that is your non-root sudo password on $install_target ." - if [ "$_INSTALL_ZEEK" = 'true' ]; then + status "Setting up future ssh connections to $install_target . You may be asked to provide your ssh password to $install_target ." + ./scripts/sshprep.sh "$install_target" + status "When prompted for a BECOME password, enter your sudo password for $install_target. If your user does not need one for sudo, just press Enter." + if [[ "$_INSTALL_ZEEK" = 'true' ]]; then # TODO: fix and re-implement cron setup after RITA#65 is resolved # status "Creating Zeek log transport Cron file" # rm -f zeek_log_transport.cron ; touch zeek_log_transport.cron # #NON_ROOT_ACCOUNT_NAME will be replaced after being placed on the target system (by an ansible recipe in install_zeek.yml # echo "5 * * * * NON_ROOT_ACCOUNT_NAME /usr/local/bin/zeek_log_transport.sh --dest $install_target" >>zeek_log_transport.cron - ansible-playbook -K -i "${install_target}," -e "install_hosts=${install_target}," install_pre.yml install_rita.yml install_zeek.yml install_post.yml + ansible-playbook -K -i "${install_target}," -e "install_hosts=${install_target}," install_pre.yml install_rita.yml install_zeek.yml else - ansible-playbook -K -i "${install_target}," -e "install_hosts=${install_target}," install_pre.yml install_rita.yml install_post.yml + ansible-playbook -K -i "${install_target}," -e "install_hosts=${install_target}," install_pre.yml install_rita.yml fi fi @@ -74,13 +88,14 @@ echo \ Brought to you by Active CounterMeasures© " -echo "RITA was successfully installed!" +echo "Installation complete!" +echo "" -if [ "$_INSTALL_ZEEK" = 'true' ]; then - echo "Please run the following commands on any new zeek sensors" - echo " zeek start ; zeek enable" - echo "" +if [[ "$_INSTALL_ZEEK" = 'true' ]]; then + echo "Please run the following commands on any new zeek sensors" >&2 + echo " zeek start ; zeek enable" >&2 + echo "" >&2 fi # switch back to original working directory -popd > /dev/null +popd > /dev/null \ No newline at end of file diff --git a/installer/install_scripts/install_rita.yml b/installer/install_scripts/install_rita.yml index 7bd694c..873c1d7 100644 --- a/installer/install_scripts/install_rita.yml +++ b/installer/install_scripts/install_rita.yml @@ -1,20 +1,4 @@ ---- -# ansible install playbook for rita V2. -# Version: 202408061413 -# sample runs: -# Optional: Add the following block, without #'s to /etc/ansible/hosts (or /opt/local/etc/ansible/hosts if using ansible on mac with mac ports). -# The hosts must each be on their own line. These can be full or short hostnames or a name following "Host" in ~/.ssh/config. -# -# [allritas] -# ro810 -# ub2404 -# -# Then run this, with a comma separated list of hostnames from the above file with a comma at the end of the list: -# -# ansible-playbook -C -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/rita-install.yml | grep -v '^skipping: ' #-C (no changes) means do a dry run -# ansible-playbook -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/rita-install.yml | grep -v '^skipping: ' - -# Many thanks to but-i-am-dominator for his help with this playbook. +# ansible install playbook for rita - name: "RITA Install: RITA installer." hosts: "{{ install_hosts }}" @@ -24,13 +8,13 @@ rita_version: "REPLACE_ME" rita_container_image: "ghcr.io/activecm/rita:{{ rita_version }}" clickhouse_container_image: clickhouse/clickhouse-server:latest - ansible_python_interpreter: /bin/python3 # Centos 7 defaults to using python2, so we force python 3. This change does not break any other distros + ansible_python_interpreter: /bin/python3 - #The install_pre.yml script should already have been run by this point + # the install_pre.yml script should already have been run by this point tasks: - #Make directories - - name: "RITA Install: Create configuration directories." + # make directories + - name: "RITA Install: Create configuration directories" ansible.builtin.file: path: "{{ item }}" state: directory @@ -47,9 +31,9 @@ - linuxdeb - linuxrpm - #Install RITA - #Following pulls right from dockerhub. We may not be able to do this if the system is airgapped - - name: "RITA Install: Install {{ rita_container_image }} docker image." + # install RITA + # the following pulls right from dockerhub. We may not be able to do this if the system is airgapped + - name: "RITA Install: Install {{ rita_container_image }} docker image" block: - name: "Pull from Github.io Container repo" community.docker.docker_image: @@ -68,8 +52,8 @@ community.docker.docker_image_load: path: "/opt/rita/rita-{{ rita_version }}-image.tar" register: load_result - #This final one prints a list of the loaded images if we use the above 2 stanzas to load from a file. - - name: "RITA Install: Print loaded image names." + # this final one prints a list of the loaded images if we use the above 2 stanzas to load from a file + - name: "RITA Install: Print loaded image names" ansible.builtin.debug: msg: "Loaded the following images: {{ load_result.image_names | join(', ') }}" tags: @@ -79,7 +63,7 @@ - linuxdeb - linuxrpm - - name: "RITA Install: Transfer rita shell script to target system." + - name: "RITA Install: Transfer rita shell script to target system" copy: src: ./opt/rita.sh dest: /usr/local/bin/rita @@ -93,7 +77,7 @@ - linuxdeb - linuxrpm - - name: "RITA Install: Transfer rita install files to /opt/rita." + - name: "RITA Install: Transfer rita install files to /opt/rita" copy: src: ./opt/ dest: /opt/rita @@ -107,7 +91,7 @@ - linuxdeb - linuxrpm - - name: "RITA Install: Transfer rita user files to /etc/rita." + - name: "RITA Install: Transfer rita user files to /etc/rita" copy: src: ./etc/ dest: /etc/rita @@ -120,4 +104,3 @@ - linux - linuxdeb - linuxrpm -#The install_post.yml script should be run next diff --git a/installer/install_scripts/install_zeek.yml b/installer/install_scripts/install_zeek.yml index 90e2b09..d10f8ff 100644 --- a/installer/install_scripts/install_zeek.yml +++ b/installer/install_scripts/install_zeek.yml @@ -1,15 +1,6 @@ ---- -# ansible install playbook for docker-zeek. -# Version: 202408061413 -# sample runs: -# Run this, with a comma separated list of hostnames from the above file with a comma at the end of the list: -# -# ansible-playbook -C -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/install_zeek.yml | grep -v '^skipping: ' #-C (no changes) means do a dry run -# ansible-playbook -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/install_zeek.yml | grep -v '^skipping: ' - -# Many thanks to but-i-am-dominator for his help with this playbook. - -- name: "Zeek Install: Zeek installer." +# ansible install playbook for docker-zeek + +- name: "Zeek Install: Zeek installer" hosts: "{{ install_hosts }}" become: true @@ -17,14 +8,14 @@ zeek_version: "REPLACE_ME" zeek_container_image: "activecm/zeek:{{ zeek_version }}" clickhouse_container_image: clickhouse/clickhouse-server:latest - ansible_python_interpreter: /bin/python3 # Centos 7 defaults to using python2, so we force python 3. This change does not break any other distros + ansible_python_interpreter: /bin/python3 local_known_hosts: "{{ lookup('env', 'HOME') }}/.ssh/known_hosts" - #The install_pre.yml script should already have been run by this point + # the install_pre.yml script should already have been run by this point tasks: - #Make directories - - name: "Zeek Install: Create zeek directories." + # make directories + - name: "Zeek Install: Create zeek directories" ansible.builtin.file: path: "{{ item }}" state: directory @@ -53,8 +44,8 @@ - linuxdeb - linuxrpm - #Install Zeek - #Following pulls right from dockerhub. We may not be able to do this if the system is airgapped + # install Zeek + # the following pulls right from dockerhub. We may not be able to do this if the system is airgapped - name: "Pull from dockerhub container repo" community.docker.docker_image: name: "{{ zeek_container_image }}" @@ -67,7 +58,7 @@ - linuxdeb - linuxrpm - - name: "Zeek Install: Transfer zeek shell script to target system." + - name: "Zeek Install: Transfer zeek shell script to target system" copy: src: ./opt/zeek dest: /usr/local/bin/zeek @@ -130,13 +121,13 @@ # - linuxdeb # - linuxrpm - - name: "Zeek Install: check whether node.cfg exists to decide if this is the first time running zeek." + - name: "Zeek Install: check whether node.cfg exists to decide if this is the first time running zeek" stat: path: /opt/zeek/etc/node.cfg register: node_stat - # We can't do this the first time. zeek start runs zeekcfg on the first pass, which is an - # interactive CLI tool to pick interface(s). The user has to run this by hand. + # this cannot be run on the first install, as zeekcfg needs to be run first to let the user select their interface(s). + # the user will need to run this by hand after the install completes. - name: "Zeek Install: Start Zeek" shell: "/usr/local/bin/zeek start" when: (node_stat.stat.exists == true) @@ -147,7 +138,8 @@ - linuxdeb - linuxrpm - # We also can't run this the first time, as it requires a running zeek. + # this cannot be run on the first install, as zeekcfg needs to be run first to let the user select their interface(s). + # the user will need to run this by hand after the install completes. - name: "Zeek Install: Start Zeek on future reboots" shell: "/usr/local/bin/zeek enable" when: (node_stat.stat.exists == true) @@ -222,4 +214,3 @@ # - linux # - linuxdeb # - linuxrpm -#The install_post.yml script should be run next diff --git a/installer/install_scripts/sshprep b/installer/install_scripts/sshprep.sh similarity index 98% rename from installer/install_scripts/sshprep rename to installer/install_scripts/sshprep.sh index 39e9130..f046e22 100755 --- a/installer/install_scripts/sshprep +++ b/installer/install_scripts/sshprep.sh @@ -1,10 +1,5 @@ -#!/bin/bash -#Performs all the setup steps needed to connect to one or more hosts listed on the command line -#Copyright 2022 William Stearns -#Released under the GPL 3.0 -#Version 0.1.8 - - +#!/usr/bin/env bash +set -euo pipefail askYN() { # Prints a question mark, reads repeatedly until the user diff --git a/installer/rita-install.md b/installer/rita-install.md deleted file mode 100644 index 9a051dd..0000000 --- a/installer/rita-install.md +++ /dev/null @@ -1,66 +0,0 @@ - -#ansible install playbook for rita V2. - -#sample runs: -# Optional: Add the following block, without #'s to /etc/ansible/hosts (or /opt/local/etc/ansible/hosts if using ansible on mac with mac ports). -#The hosts must each be on their own line. These can be full or short hostnames or a name following "Host" in ~/.ssh/config . -# -#[allritas] -#ro810 -#ub2404 -# -# Then run this, with a comma separated list of hostnames from the above file with a comma at the end of the list: -# -# ansible-playbook -C -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/rita-install.yml | grep -v '^skipping: ' #-C (no changes) means do a dry run -# ansible-playbook -K -i "ro810,ub2404," -e "install_hosts=ro810,ub2404," ~/.ansible/playbooks/rita-install.yml | grep -v '^skipping: ' - - - -#Many thanks to but-i-am-dominator for his help with this playbook. - -#Intended supported distributions. These have had testing done on at least one version. -#ADHD: 4 (based on ubuntu 20, works) -#AlmaLinux: 8, 9 (tested: 9.4, works) -#CentOS: stream 9 (tested: stream 9, works) -#Debian: 11, 12 (tested: debian 12, works) -#Fedora: 39, 40 (tested: fedora 40, works) -#Kali: 2024.2 (tested: 2024.2, works) -#OracleLinux: 9 (tested: 9.4, works. NOTE: this was done on Security Onion 2.4.70 which is _based_ on Oracle Linux 9.4) -#Rocky: 8, 9 (tested: rocky 8, works) -#Security Onion: 2.4.70 (based on oracle linux 9, works) -#Ubuntu 20.04, 22.04, 24.04 (tested: ubuntu 24.04, works) - -#We hope to support these in the future, but they are not supported at the moment. -#MacOS: Sonoma -#RHEL: 8, 9 (as of 20240618 there's a known conflict between rhel 9 and docker-ce: -#Note: RHEL 9 is currently (20240618) broken with docker-ce (and docker knows this -#and puts up a warning for this distro. Current error from trying to install on rhel 9: -# -#fatal: [rhel9-aws]: FAILED! => {"changed": false, "failures": [], -#"msg": "Depsolve Error occurred: \n Problem 1: cannot install the -#best candidate for the job\n - nothing provides container-selinux -#>= 2:2.74 needed by docker-ce-3:26.1.4-1.el9.x86_64 from -#docker-ce\n - nothing provides iptables needed by -#docker-ce-3:26.1.4-1.el9.x86_64 from docker-ce\n Problem 2: cannot -#install the best candidate for the job\n - nothing provides -#container-selinux >= 2:2.74 needed by -#containerd.io-1.6.33-3.1.el9.x86_64 from docker-ce", "rc": 1, -#"results": []} - -#Intended supported CPU architectures - not all have been tested yet. For any CPU architectures we hope to support, we need -#to build rita for that architecture. To confirm whether your CPU is 32 bit vs 64 bit, run -#lshw | head | grep -i width -# width: 64 bits -# -#x86_64 #All testing so far has been on x86_64 -#Possible future supported architectures -#aarch64 #Pi4 and Pi5, but note this requires a 64 bit OS like Ubuntu or recent RaspiOS64 for pi. Appears to be equal to arm64. -#armhf #32 bit arm, likely includes pi3 and below (or pi4 and pi5 when running a 32 bit OS) -# #For reference, pi zero and pi1 are 32 bit/arm6hf, pi2 is 32 bit/armhf, and (64 bit) pi zero 2, pi3, and pi4 are arm64=aarch64 (though these may not have a 64 bit os to run on them.) - - - - - - - diff --git a/installer/run_dev.sh b/installer/run_dev.sh index 209d06a..59b56c7 100755 --- a/installer/run_dev.sh +++ b/installer/run_dev.sh @@ -1,29 +1,45 @@ -set -e +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +RITA_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" + droplet_ip="$1" -if [ -z "$droplet_ip" ]; then +if [[ -z "$droplet_ip" ]]; then echo "droplet ip was not provided" exit 1 fi -VERSION=$(git describe --always --abbrev=0 --tags) - - -./generate_installer.sh -tar -xf rita-${VERSION}.tar.gz -./rita-${VERSION}-installer/install_rita.sh "root@$droplet_ip" +# get RITA version from git +if VERSION="$(git -C "$RITA_DIR" describe --tags --exact-match 2>/dev/null)"; then + : # release / ci +elif VERSION="$(git -C "$RITA_DIR" describe --tags --dirty --always 2>/dev/null)"; then + : # dev +else + fail "Unable to determine RITA_VERSION." +fi +[[ -n "$VERSION" ]] || { echo "Unable to determine RITA_VERSION." >&2; exit 1; } -# # # # ansible-playbook -i digitalocean_inventory.py -e "install_hosts=${droplet_ip}" "./rita-${VERSION}-installer/install_rita.yml" +# generate installer +"${SCRIPT_DIR}/generate_installer.sh" +INSTALLER_DIR="${SCRIPT_DIR}/rita-${VERSION}-installer" +# verify tar ball exists +[[ -f "${INSTALLER_DIR}.tar.gz" ]] || { echo "RITA installer tarball not found." >&2; exit 1; } +tar -xf "${INSTALLER_DIR}.tar.gz" +[[ -f "${INSTALLER_DIR}/install_rita.sh" ]] || { echo "RITA installer script not found." >&2; exit 1; } +"${INSTALLER_DIR}/install_rita.sh" "root@$droplet_ip" +# # # # ansible-playbook -i digitalocean_inventory.py -e "install_hosts=${droplet_ip}" "./rita-${VERSION}-installer/install_rita.yml" # copy over test data -scp -r ../test_data/open_sni "root@$droplet_ip":/root/sample_logs +scp -r "${RITA_DIR}/test_data/open_sni" "root@$droplet_ip":/root/sample_logs # # copy over test script -scp ./test_installed.sh "root@$droplet_ip":/root/test_installed.sh +scp "${SCRIPT_DIR}/test_installed.sh" "root@$droplet_ip":/root/test_installed.sh # run test script ssh -t "root@$droplet_ip" /root/test_installed.sh "$VERSION" -# \ No newline at end of file +# \ No newline at end of file diff --git a/installer/test_installed.sh b/installer/test_installed.sh index 52a04e7..f74ee50 100755 --- a/installer/test_installed.sh +++ b/installer/test_installed.sh @@ -1,28 +1,27 @@ #!/usr/bin/env bash -set -e +set -euo pipefail VERSION="$1" -if [ -z "$VERSION" ]; then +if [[ -z "$VERSION" ]]; then echo "VERSION was not set" exit 1 fi # check that all files exist in expected locations -[ -f /usr/local/bin/rita ] || { echo >&2 "rita should be in /usr/local/bin"; exit 1; } +[[ -f /usr/local/bin/rita ]] || { echo >&2 "rita should be in /usr/local/bin"; exit 1; } # opt files -[ -f /opt/rita/rita.sh ] || { echo >&2 "rita.sh should be in /opt/rita"; exit 1; } -[ -f /opt/rita/docker-compose.yml ] || { echo >&2 "docker-compose.yml should be in /opt/rita"; exit 1; } -[ -f /opt/rita/.env ] || { echo >&2 ".env should be in /opt/rita"; exit 1; } - +[[ -f /opt/rita/rita.sh ]] || { echo >&2 "rita.sh should be in /opt/rita"; exit 1; } +[[ -f /opt/rita/docker-compose.yml ]] || { echo >&2 "docker-compose.yml should be in /opt/rita"; exit 1; } +[[ -f /opt/rita/.env ]] || { echo >&2 ".env should be in /opt/rita"; exit 1; } # etc files -[ -f /etc/rita/config.hjson ] || { echo >&2 "config.hjson should be in /etc/rita"; exit 1; } -[ -f /etc/rita/config.xml ] || { echo >&2 "config.xml should be in /etc/rita"; exit 1; } -[ -f /etc/rita/http_extensions_list.csv ] || { echo >&2 "http_extensions_list.csv should be in /etc/rita"; exit 1; } -[ -f /etc/rita/logger-cron ] || { echo >&2 "logger-cron should be in /etc/rita"; exit 1; } -[ -f /etc/rita/syslog-ng.conf ] || { echo >&2 "syslog-ng.conf should be in /etc/rita"; exit 1; } -[ -f /etc/rita/timezone.xml ] || { echo >&2 "timezone.xml should be in /etc/rita"; exit 1; } -[ -d /etc/rita/threat_intel_feeds ] || { echo >&2 "/threat_intel_feeds should be in /etc/rita"; exit 1; } +[[ -f /etc/rita/config.hjson ]] || { echo >&2 "config.hjson should be in /etc/rita"; exit 1; } +[[ -f /etc/rita/config.xml ]] || { echo >&2 "config.xml should be in /etc/rita"; exit 1; } +[[ -f /etc/rita/http_extensions_list.csv ]] || { echo >&2 "http_extensions_list.csv should be in /etc/rita"; exit 1; } +[[ -f /etc/rita/logger-cron ]] || { echo >&2 "logger-cron should be in /etc/rita"; exit 1; } +[[ -f /etc/rita/syslog-ng.conf ]] || { echo >&2 "syslog-ng.conf should be in /etc/rita"; exit 1; } +[[ -f /etc/rita/timezone.xml ]] || { echo >&2 "timezone.xml should be in /etc/rita"; exit 1; } +[[ -d /etc/rita/threat_intel_feeds ]] || { echo >&2 "/threat_intel_feeds should be in /etc/rita"; exit 1; } # verify that sed worked during installer generation if [ "$(grep -c "image: ghcr.io/activecm/rita:${VERSION}" /opt/rita/docker-compose.yml)" -ne 1 ]; then @@ -32,24 +31,24 @@ fi # verify .env has production looking values -if [ "$(grep -c "^CONFIG_DIR=/etc/rita" /opt/rita/.env)" -ne 1 ]; then +if [[ "$(grep -c "^CONFIG_DIR=/etc/rita" /opt/rita/.env)" -ne 1 ]]; then echo "/opt/rita/.env should have CONFIG_DIR=/etc/rita set" exit 1 fi -if [ "$(grep -c "^CONFIG_FILE=/etc/rita/config.hjson" /opt/rita/.env)" -ne 1 ]; then +if [[ "$(grep -c "^CONFIG_FILE=/etc/rita/config.hjson" /opt/rita/.env)" -ne 1 ]]; then echo "/opt/rita/.env should have CONFIG_FILE=/etc/rita/config.hjson set" exit 1 fi -if [ "$(grep -c "^DB_ADDRESS=db:9000" /opt/rita/.env)" -ne 1 ]; then +if [[ "$(grep -c "^DB_ADDRESS=db:9000" /opt/rita/.env)" -ne 1 ]]; then echo "/opt/rita/.env should have DB_ADDRESS=db:9000 set" exit 1 fi # verify rita version -if [ "$(rita --version | grep -c "$VERSION")" -ne 1 ]; then +if [[ "$(rita --version | grep -c "$VERSION")" -ne 1 ]]; then echo "rita version command did not work correctly" exit 1 fi diff --git a/integration_rolling/init_ch.sh b/integration_rolling/init_ch.sh index 8c034be..5f70c82 100755 --- a/integration_rolling/init_ch.sh +++ b/integration_rolling/init_ch.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash apt-get update apt-get install -y libfaketime diff --git a/util/util.go b/util/util.go index e20e50b..a3e1e7f 100644 --- a/util/util.go +++ b/util/util.go @@ -58,6 +58,11 @@ type FixedString struct { Data [16]byte } +type WalkError struct { + Path string + Error error +} + func init() { // parse private IPs privateIPs, _ := NewSubnetList(