diff --git a/.github/workflows/api-build-and-push-ghcr.yml b/.github/workflows/api-build-and-push-ghcr.yml index ae0aa9276..c3afef062 100644 --- a/.github/workflows/api-build-and-push-ghcr.yml +++ b/.github/workflows/api-build-and-push-ghcr.yml @@ -13,6 +13,8 @@ on: - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' - '!shared/bin/os-disk-config.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' - '!shared/bin/configure-capture.py' diff --git a/.github/workflows/arkime-build-and-push-ghcr.yml b/.github/workflows/arkime-build-and-push-ghcr.yml index 7d05235db..6e29fe344 100644 --- a/.github/workflows/arkime-build-and-push-ghcr.yml +++ b/.github/workflows/arkime-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/dashboards-build-and-push-ghcr.yml b/.github/workflows/dashboards-build-and-push-ghcr.yml index c02f705cd..89a231969 100644 --- a/.github/workflows/dashboards-build-and-push-ghcr.yml +++ b/.github/workflows/dashboards-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/dashboards-helper-build-and-push-ghcr.yml b/.github/workflows/dashboards-helper-build-and-push-ghcr.yml index 1a4654e1b..3b37f288b 100644 --- a/.github/workflows/dashboards-helper-build-and-push-ghcr.yml +++ b/.github/workflows/dashboards-helper-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/file-upload-build-and-push-ghcr.yml b/.github/workflows/file-upload-build-and-push-ghcr.yml index ae305baf4..d49a4c764 100644 --- a/.github/workflows/file-upload-build-and-push-ghcr.yml +++ b/.github/workflows/file-upload-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/filebeat-build-and-push-ghcr.yml b/.github/workflows/filebeat-build-and-push-ghcr.yml index f38fce003..22eda2753 100644 --- a/.github/workflows/filebeat-build-and-push-ghcr.yml +++ b/.github/workflows/filebeat-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/freq-build-and-push-ghcr.yml b/.github/workflows/freq-build-and-push-ghcr.yml index 2b4981465..460306157 100644 --- a/.github/workflows/freq-build-and-push-ghcr.yml +++ b/.github/workflows/freq-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/hedgehog-iso-build-docker-wrap-push-ghcr.yml b/.github/workflows/hedgehog-iso-build-docker-wrap-push-ghcr.yml index 76361ae13..bf818790a 100644 --- a/.github/workflows/hedgehog-iso-build-docker-wrap-push-ghcr.yml +++ b/.github/workflows/hedgehog-iso-build-docker-wrap-push-ghcr.yml @@ -99,6 +99,9 @@ jobs: cp -r ./arkime/patch ./hedgehog-iso/shared/arkime_patch mkdir -p ./hedgehog-iso/suricata cp -r ./suricata/rules-default ./hedgehog-iso/suricata/ + mkdir -p ./hedgehog-iso/nginx + cp -r ./nginx/landingpage/css ./hedgehog-iso/nginx/ + cp -r ./nginx/landingpage/js ./hedgehog-iso/nginx/ pushd ./hedgehog-iso echo "${{ steps.extract_malcolm_version.outputs.mversion }}" > ./shared/version.txt echo "${{ secrets.MAXMIND_GEOIP_DB_LICENSE_KEY }}" > ./shared/maxmind_license.txt diff --git a/.github/workflows/htadmin-build-and-push-ghcr.yml b/.github/workflows/htadmin-build-and-push-ghcr.yml index 797e867d5..b42016a6c 100644 --- a/.github/workflows/htadmin-build-and-push-ghcr.yml +++ b/.github/workflows/htadmin-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/logstash-build-and-push-ghcr.yml b/.github/workflows/logstash-build-and-push-ghcr.yml index 40416b40b..ab3245d02 100644 --- a/.github/workflows/logstash-build-and-push-ghcr.yml +++ b/.github/workflows/logstash-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/malcolm-iso-build-docker-wrap-push-ghcr.yml b/.github/workflows/malcolm-iso-build-docker-wrap-push-ghcr.yml index b648dd8a5..11dc59474 100644 --- a/.github/workflows/malcolm-iso-build-docker-wrap-push-ghcr.yml +++ b/.github/workflows/malcolm-iso-build-docker-wrap-push-ghcr.yml @@ -9,6 +9,8 @@ on: - 'malcolm-iso/**' - 'shared/bin/*' - '!shared/bin/configure-capture.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/zeek*' - '!shared/bin/suricata*' - '.trigger_iso_workflow_build' diff --git a/.github/workflows/netbox-build-and-push-ghcr.yml b/.github/workflows/netbox-build-and-push-ghcr.yml index 05e927320..133f82735 100644 --- a/.github/workflows/netbox-build-and-push-ghcr.yml +++ b/.github/workflows/netbox-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/nginx-build-and-push-ghcr.yml b/.github/workflows/nginx-build-and-push-ghcr.yml index 5eb534666..654e6fdf6 100644 --- a/.github/workflows/nginx-build-and-push-ghcr.yml +++ b/.github/workflows/nginx-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/agg-init.sh' - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/os-disk-config.py' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' diff --git a/.github/workflows/opensearch-build-and-push-ghcr.yml b/.github/workflows/opensearch-build-and-push-ghcr.yml index 290329cb6..acfb82187 100644 --- a/.github/workflows/opensearch-build-and-push-ghcr.yml +++ b/.github/workflows/opensearch-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' - '!shared/bin/os-disk-config.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' - '!shared/bin/configure-capture.py' diff --git a/.github/workflows/pcap-capture-build-and-push-ghcr.yml b/.github/workflows/pcap-capture-build-and-push-ghcr.yml index b79262978..190f89752 100644 --- a/.github/workflows/pcap-capture-build-and-push-ghcr.yml +++ b/.github/workflows/pcap-capture-build-and-push-ghcr.yml @@ -13,6 +13,8 @@ on: - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' - '!shared/bin/os-disk-config.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' - '!shared/bin/configure-capture.py' diff --git a/.github/workflows/pcap-monitor-build-and-push-ghcr.yml b/.github/workflows/pcap-monitor-build-and-push-ghcr.yml index 0384acfcb..62fdcce1f 100644 --- a/.github/workflows/pcap-monitor-build-and-push-ghcr.yml +++ b/.github/workflows/pcap-monitor-build-and-push-ghcr.yml @@ -13,6 +13,8 @@ on: - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' - '!shared/bin/os-disk-config.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' - '!shared/bin/configure-capture.py' diff --git a/.github/workflows/postgresql-build-and-push-ghcr.yml b/.github/workflows/postgresql-build-and-push-ghcr.yml index 1b190750d..467a3396d 100644 --- a/.github/workflows/postgresql-build-and-push-ghcr.yml +++ b/.github/workflows/postgresql-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' - '!shared/bin/os-disk-config.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' - '!shared/bin/configure-capture.py' diff --git a/.github/workflows/redis-build-and-push-ghcr.yml b/.github/workflows/redis-build-and-push-ghcr.yml index 0cbb9d8f7..49373adac 100644 --- a/.github/workflows/redis-build-and-push-ghcr.yml +++ b/.github/workflows/redis-build-and-push-ghcr.yml @@ -12,6 +12,8 @@ on: - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' - '!shared/bin/os-disk-config.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' - '!shared/bin/configure-capture.py' diff --git a/.github/workflows/suricata-build-and-push-ghcr.yml b/.github/workflows/suricata-build-and-push-ghcr.yml index 8cbfa7a39..eae9b387e 100644 --- a/.github/workflows/suricata-build-and-push-ghcr.yml +++ b/.github/workflows/suricata-build-and-push-ghcr.yml @@ -13,6 +13,8 @@ on: - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' - '!shared/bin/os-disk-config.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' - '!shared/bin/configure-capture.py' diff --git a/.github/workflows/zeek-build-and-push-ghcr.yml b/.github/workflows/zeek-build-and-push-ghcr.yml index 7969ec3d0..1a64b28ab 100644 --- a/.github/workflows/zeek-build-and-push-ghcr.yml +++ b/.github/workflows/zeek-build-and-push-ghcr.yml @@ -13,6 +13,8 @@ on: - '!shared/bin/common-init.sh' - '!shared/bin/sensor-init.sh' - '!shared/bin/os-disk-config.py' + - '!shared/bin/extracted_files_http_server.py' + - '!shared/bin/web-ui-asset-download.sh' - '!shared/bin/preseed_late_user_config.sh' - '!shared/bin/configure-interfaces.py' - '!shared/bin/configure-capture.py' diff --git a/Dockerfiles/arkime.Dockerfile b/Dockerfiles/arkime.Dockerfile index 6a5fc3877..39bb08b84 100644 --- a/Dockerfiles/arkime.Dockerfile +++ b/Dockerfiles/arkime.Dockerfile @@ -7,7 +7,7 @@ ENV TERM xterm ENV PYTHONDONTWRITEBYTECODE 1 ENV PYTHONUNBUFFERED 1 -ENV ARKIME_VERSION "v5.0.1" +ENV ARKIME_VERSION "v5.1.2" ENV ARKIME_DIR "/opt/arkime" ENV ARKIME_URL "https://github.com/arkime/arkime.git" ENV ARKIME_LOCALELASTICSEARCH no @@ -16,7 +16,8 @@ ENV ARKIME_INET yes ADD arkime/scripts/bs4_remove_div.py /opt/ ADD arkime/patch/* /opt/patches/ -RUN apt-get -q update && \ +RUN export DEBARCH=$(dpkg --print-architecture) && \ + apt-get -q update && \ apt-get -y -q --no-install-recommends upgrade && \ apt-get install -q -y --no-install-recommends \ binutils \ @@ -73,7 +74,10 @@ RUN apt-get -q update && \ make install && \ npm cache clean --force && \ rm -f ${ARKIME_DIR}/wiseService/source.* ${ARKIME_DIR}/etc/*.systemd.service && \ - bash -c "file ${ARKIME_DIR}/bin/* ${ARKIME_DIR}/node-v*/bin/* | grep 'ELF 64-bit' | sed 's/:.*//' | xargs -l -r strip -v --strip-unneeded" + bash -c "file ${ARKIME_DIR}/bin/* ${ARKIME_DIR}/node-v*/bin/* | grep 'ELF 64-bit' | sed 's/:.*//' | xargs -l -r strip -v --strip-unneeded" && \ + mkdir -p "${ARKIME_DIR}"/plugins && \ + curl -fsSL -o "${ARKIME_DIR}/plugins/ja4plus.${DEBARCH}.so" "https://github.com/arkime/arkime/releases/download/${ARKIME_VERSION}/ja4plus.${DEBARCH}.so" && \ + chmod 755 "${ARKIME_DIR}/plugins/ja4plus.${DEBARCH}.so" FROM debian:12-slim diff --git a/Dockerfiles/dashboards-helper.Dockerfile b/Dockerfiles/dashboards-helper.Dockerfile index 530d585e0..64b2006a4 100644 --- a/Dockerfiles/dashboards-helper.Dockerfile +++ b/Dockerfiles/dashboards-helper.Dockerfile @@ -61,7 +61,7 @@ ADD scripts/malcolm_utils.py /data/ RUN apk update --no-cache && \ apk upgrade --no-cache && \ - apk --no-cache add bash python3 py3-pip curl openssl procps psmisc npm rsync shadow jq tini && \ + apk --no-cache add bash python3 py3-pip curl openssl procps psmisc moreutils npm rsync shadow jq tini && \ npm install -g http-server && \ pip3 install --break-system-packages supervisor humanfriendly requests && \ curl -fsSLO "$SUPERCRONIC_URL" && \ @@ -95,7 +95,7 @@ RUN apk update --no-cache && \ /opt/templates && \ chmod 755 /data/*.sh /data/*.py /data/init && \ chmod 400 /opt/maps/* && \ - (echo -e "*/2 * * * * /data/create-arkime-sessions-index.sh\n0 10 * * * /data/index-refresh.py --index MALCOLM_NETWORK_INDEX_PATTERN --template malcolm_template --unassigned\n30 */2 * * * /data/index-refresh.py --index MALCOLM_OTHER_INDEX_PATTERN --template malcolm_beats_template --unassigned\n*/20 * * * * /data/opensearch_index_size_prune.py" > ${SUPERCRONIC_CRONTAB}) + (echo -e "*/2 * * * * /data/shared-object-creation.sh\n0 10 * * * /data/index-refresh.py --index MALCOLM_NETWORK_INDEX_PATTERN --template malcolm_template --unassigned\n30 */2 * * * /data/index-refresh.py --index MALCOLM_OTHER_INDEX_PATTERN --template malcolm_beats_template --unassigned\n*/20 * * * * /data/opensearch_index_size_prune.py" > ${SUPERCRONIC_CRONTAB}) EXPOSE $OFFLINE_REGION_MAPS_PORT diff --git a/Dockerfiles/dashboards.Dockerfile b/Dockerfiles/dashboards.Dockerfile index 0ff60b940..ea38a026e 100644 --- a/Dockerfiles/dashboards.Dockerfile +++ b/Dockerfiles/dashboards.Dockerfile @@ -1,4 +1,4 @@ -FROM opensearchproject/opensearch-dashboards:2.12.0 +FROM opensearchproject/opensearch-dashboards:2.13.0 LABEL maintainer="malcolm@inl.gov" LABEL org.opencontainers.image.authors='malcolm@inl.gov' @@ -20,7 +20,7 @@ ENV PUSER_PRIV_DROP true ENV TERM xterm ENV TINI_VERSION v0.19.0 -ENV OSD_TRANSFORM_VIS_VERSION 2.12.0 +ENV OSD_TRANSFORM_VIS_VERSION 2.13.0 ARG NODE_OPTIONS="--max_old_space_size=4096" ENV NODE_OPTIONS $NODE_OPTIONS @@ -40,8 +40,8 @@ RUN yum upgrade -y && \ /usr/share/opensearch-dashboards/bin/opensearch-dashboards-plugin remove securityDashboards --allow-root && \ cd /tmp && \ # unzip transformVis.zip opensearch-dashboards/transformVis/opensearch_dashboards.json opensearch-dashboards/transformVis/package.json && \ - # sed -i "s/2\.12\.0/2\.12\.0/g" opensearch-dashboards/transformVis/opensearch_dashboards.json && \ - # sed -i "s/2\.12\.0/2\.12\.0/g" opensearch-dashboards/transformVis/package.json && \ + # sed -i "s/2\.12\.0/2\.13\.0/g" opensearch-dashboards/transformVis/opensearch_dashboards.json && \ + # sed -i "s/2\.12\.0/2\.13\.0/g" opensearch-dashboards/transformVis/package.json && \ # zip transformVis.zip opensearch-dashboards/transformVis/opensearch_dashboards.json opensearch-dashboards/transformVis/package.json && \ cd /usr/share/opensearch-dashboards/plugins && \ /usr/share/opensearch-dashboards/bin/opensearch-dashboards-plugin install file:///tmp/transformVis.zip --allow-root && \ diff --git a/Dockerfiles/file-monitor.Dockerfile b/Dockerfiles/file-monitor.Dockerfile index f3992d895..886969d5a 100644 --- a/Dockerfiles/file-monitor.Dockerfile +++ b/Dockerfiles/file-monitor.Dockerfile @@ -34,6 +34,9 @@ ARG EXTRACTED_FILE_SCANNER_START_SLEEP=10 ARG EXTRACTED_FILE_LOGGER_START_SLEEP=5 ARG EXTRACTED_FILE_MIN_BYTES=64 ARG EXTRACTED_FILE_MAX_BYTES=134217728 +ARG EXTRACTED_FILE_PRUNE_THRESHOLD_MAX_SIZE=1TB +ARG EXTRACTED_FILE_PRUNE_THRESHOLD_TOTAL_DISK_USAGE_PERCENT=0 +ARG EXTRACTED_FILE_PRUNE_INTERVAL_SECONDS=300 ARG VTOT_API2_KEY=0 ARG VTOT_REQUESTS_PER_MINUTE=4 ARG EXTRACTED_FILE_ENABLE_CLAMAV=false @@ -65,6 +68,9 @@ ENV EXTRACTED_FILE_SCANNER_START_SLEEP $EXTRACTED_FILE_SCANNER_START_SLEEP ENV EXTRACTED_FILE_LOGGER_START_SLEEP $EXTRACTED_FILE_LOGGER_START_SLEEP ENV EXTRACTED_FILE_MIN_BYTES $EXTRACTED_FILE_MIN_BYTES ENV EXTRACTED_FILE_MAX_BYTES $EXTRACTED_FILE_MAX_BYTES +ENV EXTRACTED_FILE_PRUNE_THRESHOLD_MAX_SIZE $EXTRACTED_FILE_PRUNE_THRESHOLD_MAX_SIZE +ENV EXTRACTED_FILE_PRUNE_THRESHOLD_TOTAL_DISK_USAGE_PERCENT $EXTRACTED_FILE_PRUNE_THRESHOLD_TOTAL_DISK_USAGE_PERCENT +ENV EXTRACTED_FILE_PRUNE_INTERVAL_SECONDS $EXTRACTED_FILE_PRUNE_INTERVAL_SECONDS ENV VTOT_API2_KEY $VTOT_API2_KEY ENV VTOT_REQUESTS_PER_MINUTE $VTOT_REQUESTS_PER_MINUTE ENV EXTRACTED_FILE_ENABLE_CLAMAV $EXTRACTED_FILE_ENABLE_CLAMAV @@ -103,6 +109,11 @@ ENV SUPERCRONIC_SHA1SUM "cd48d45c4b10f3f0bfdd3a57d054cd05ac96812b" ENV SUPERCRONIC_CRONTAB "/etc/crontab" COPY --chmod=755 shared/bin/yara_rules_setup.sh /usr/local/bin/ +ADD nginx/landingpage/css "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css" +ADD nginx/landingpage/js "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/js" +ADD --chmod=644 docs/images/logo/Malcolm_background.png "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/assets/img/bg-masthead.png" +COPY --chmod=644 docs/images/icon/favicon.ico "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/favicon.ico" +COPY --chmod=755 shared/bin/web-ui-asset-download.sh /usr/local/bin/ RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sources && \ apt-get -q update && \ @@ -129,7 +140,7 @@ RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sour pkg-config \ tini \ unzip && \ - apt-get -y -q install \ + apt-get -y -q install \ inotify-tools \ libzmq5 \ psmisc \ @@ -143,6 +154,7 @@ RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sour python3 -m pip install --break-system-packages --no-compile --no-cache-dir \ clamd \ dominate \ + humanfriendly \ psutil \ pycryptodome \ python-magic \ @@ -170,6 +182,8 @@ RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sour rm -rf "${SRC_BASE_DIR}"/yara* && \ cd "${YARA_RULES_SRC_DIR}" && \ /usr/local/bin/yara_rules_setup.sh -r "${YARA_RULES_SRC_DIR}" -y "${YARA_RULES_DIR}" && \ + cd /tmp && \ + /usr/local/bin/web-ui-asset-download.sh -o "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css" && \ cd /tmp && \ curl -fsSL -o ./capa.zip "${CAPA_URL}" && \ unzip ./capa.zip && \ @@ -190,9 +204,6 @@ RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sour libtool \ make \ python3-dev && \ - apt-get -y -q --allow-downgrades --allow-remove-essential --allow-change-held-packages autoremove && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* && \ mkdir -p /var/log/clamav "${CLAMAV_RULES_DIR}" && \ groupadd --gid ${DEFAULT_GID} ${PGROUP} && \ useradd -m --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} ${PUSER} && \ @@ -214,7 +225,10 @@ RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sour ln -r -s /usr/local/bin/zeek_carve_scanner.py /usr/local/bin/clam_scan.py && \ ln -r -s /usr/local/bin/zeek_carve_scanner.py /usr/local/bin/yara_scan.py && \ ln -r -s /usr/local/bin/zeek_carve_scanner.py /usr/local/bin/capa_scan.py && \ - echo "0 */6 * * * /bin/bash /usr/local/bin/capa-update.sh\n0 */6 * * * /usr/local/bin/yara_rules_setup.sh -r \"${YARA_RULES_SRC_DIR}\" -y \"${YARA_RULES_DIR}\"" > ${SUPERCRONIC_CRONTAB} + echo "0 */6 * * * /bin/bash /usr/local/bin/capa-update.sh\n0 */6 * * * /usr/local/bin/yara_rules_setup.sh -r \"${YARA_RULES_SRC_DIR}\" -y \"${YARA_RULES_DIR}\"" > ${SUPERCRONIC_CRONTAB} && \ + apt-get -y -q --allow-downgrades --allow-remove-essential --allow-change-held-packages autoremove && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* USER ${PUSER} @@ -222,23 +236,11 @@ RUN /usr/bin/freshclam freshclam --config-file=/etc/clamav/freshclam.conf USER root -ADD nginx/landingpage/css "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css" -ADD nginx/landingpage/js "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/js" -ADD --chmod=644 docs/images/logo/Malcolm_background.png "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/assets/img/bg-masthead.png" -ADD --chmod=644 https://fonts.gstatic.com/s/lato/v24/S6u_w4BMUTPHjxsI9w2_Gwfo.ttf "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css/" -ADD --chmod=644 https://fonts.gstatic.com/s/lato/v24/S6u8w4BMUTPHjxsAXC-v.ttf "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css/" -ADD --chmod=644 https://fonts.gstatic.com/s/lato/v24/S6u_w4BMUTPHjxsI5wq_Gwfo.ttf "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css/" -ADD --chmod=644 https://fonts.gstatic.com/s/lato/v24/S6u9w4BMUTPHh7USSwiPHA.ttf "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css/" -ADD --chmod=644 https://fonts.gstatic.com/s/lato/v24/S6uyw4BMUTPHjx4wWw.ttf "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css/" -ADD --chmod=644 https://fonts.gstatic.com/s/lato/v24/S6u9w4BMUTPHh6UVSwiPHA.ttf "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css/" -ADD --chmod=644 'https://cdn.jsdelivr.net/npm/bootstrap-icons@1.5.0/font/fonts/bootstrap-icons.woff2?856008caa5eb66df68595e734e59580d' "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css/bootstrap-icons.woff2" -ADD --chmod=644 'https://cdn.jsdelivr.net/npm/bootstrap-icons@1.5.0/font/fonts/bootstrap-icons.woff?856008caa5eb66df68595e734e59580d' "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/css/bootstrap-icons.woff" - -COPY --chmod=644 docs/images/icon/favicon.ico "${EXTRACTED_FILE_HTTP_SERVER_ASSETS_DIR}/favicon.ico" COPY --chmod=755 shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ +COPY --chmod=755 shared/bin/prune_files.sh /usr/local/bin/ COPY --chmod=755 shared/bin/service_check_passthrough.sh /usr/local/bin/ COPY --chmod=755 shared/bin/zeek_carve*.py /usr/local/bin/ -COPY --chmod=755 file-monitor/scripts/*.py /usr/local/bin/ +COPY --chmod=755 shared/bin/extracted_files_http_server.py /usr/local/bin/ COPY --chmod=644 shared/bin/watch_common.py /usr/local/bin/ COPY --chmod=644 scripts/malcolm_utils.py /usr/local/bin/ COPY --chmod=644 file-monitor/supervisord.conf /etc/supervisord.conf diff --git a/Dockerfiles/filebeat.Dockerfile b/Dockerfiles/filebeat.Dockerfile index 2dc7b2ea0..39b981775 100644 --- a/Dockerfiles/filebeat.Dockerfile +++ b/Dockerfiles/filebeat.Dockerfile @@ -1,4 +1,4 @@ -FROM docker.elastic.co/beats/filebeat-oss:8.12.1 +FROM docker.elastic.co/beats/filebeat-oss:8.13.2 # Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. LABEL maintainer="malcolm@inl.gov" diff --git a/Dockerfiles/logstash.Dockerfile b/Dockerfiles/logstash.Dockerfile index 0cf75e329..fa0f30a4a 100644 --- a/Dockerfiles/logstash.Dockerfile +++ b/Dockerfiles/logstash.Dockerfile @@ -1,4 +1,4 @@ -FROM docker.elastic.co/logstash/logstash-oss:8.12.1 +FROM docker.elastic.co/logstash/logstash-oss:8.13.2 LABEL maintainer="malcolm@inl.gov" LABEL org.opencontainers.image.authors='malcolm@inl.gov' diff --git a/Dockerfiles/opensearch.Dockerfile b/Dockerfiles/opensearch.Dockerfile index 893a2debe..0831627a2 100644 --- a/Dockerfiles/opensearch.Dockerfile +++ b/Dockerfiles/opensearch.Dockerfile @@ -1,4 +1,4 @@ -FROM opensearchproject/opensearch:2.12.0 +FROM opensearchproject/opensearch:2.13.0 # Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. LABEL maintainer="malcolm@inl.gov" diff --git a/Dockerfiles/zeek.Dockerfile b/Dockerfiles/zeek.Dockerfile index 1ebaf7249..cab1048fe 100644 --- a/Dockerfiles/zeek.Dockerfile +++ b/Dockerfiles/zeek.Dockerfile @@ -31,12 +31,6 @@ ENV PGROUP "zeeker" # a final check in docker_entrypoint.sh before startup ENV PUSER_PRIV_DROP false -ENV SUPERCRONIC_VERSION "0.2.29" -ENV SUPERCRONIC_URL "https://github.com/aptible/supercronic/releases/download/v$SUPERCRONIC_VERSION/supercronic-linux-amd64" -ENV SUPERCRONIC "supercronic-linux-amd64" -ENV SUPERCRONIC_SHA1SUM "cd48d45c4b10f3f0bfdd3a57d054cd05ac96812b" -ENV SUPERCRONIC_CRONTAB "/etc/crontab" - # for download and install ARG ZEEK_VERSION=6.2.0-0 ENV ZEEK_VERSION $ZEEK_VERSION @@ -53,6 +47,12 @@ ENV CCACHE_COMPRESS 1 ADD shared/bin/zeek-deb-download.sh /usr/local/bin/ ADD shared/bin/zeek_install_plugins.sh /usr/local/bin/ +ENV SUPERCRONIC_VERSION "0.2.29" +ENV SUPERCRONIC_URL "https://github.com/aptible/supercronic/releases/download/v$SUPERCRONIC_VERSION/supercronic-linux-amd64" +ENV SUPERCRONIC "supercronic-linux-amd64" +ENV SUPERCRONIC_SHA1SUM "cd48d45c4b10f3f0bfdd3a57d054cd05ac96812b" +ENV SUPERCRONIC_CRONTAB "${ZEEK_DIR}/crontab" + # build and install system packages, zeek, spicy and plugins RUN export DEBARCH=$(dpkg --print-architecture) && \ apt-get -q update && \ @@ -190,13 +190,14 @@ RUN mkdir -p /tmp/logs && \ ARG AUTO_TAG=true #Whether or not to start up the pcap_processor script to monitor pcaps ARG ZEEK_PCAP_PROCESSOR=true -#Whether or not to start up supercronic for updating intel definitions -ARG ZEEK_CRON=true #Whether or not to run "zeek -r XXXXX.pcap local" on each pcap file ARG ZEEK_AUTO_ANALYZE_PCAP_FILES=false ARG ZEEK_AUTO_ANALYZE_PCAP_THREADS=1 -ARG ZEEK_INTEL_ITEM_EXPIRATION=-1min +#Whether or not to refresh intel at various points during processing +ARG ZEEK_INTEL_REFRESH_ON_ENTRYPOINT=false +ARG ZEEK_INTEL_REFRESH_ON_DEPLOY=false ARG ZEEK_INTEL_REFRESH_CRON_EXPRESSION= +ARG ZEEK_INTEL_ITEM_EXPIRATION=-1min ARG ZEEK_INTEL_REFRESH_THREADS=2 ARG ZEEK_INTEL_FEED_SINCE= ARG ZEEK_INTEL_FEED_SSL_CERTIFICATE_VERIFICATION=false @@ -216,11 +217,12 @@ ARG PCAP_NODE_NAME=malcolm ENV AUTO_TAG $AUTO_TAG ENV ZEEK_PCAP_PROCESSOR $ZEEK_PCAP_PROCESSOR -ENV ZEEK_CRON $ZEEK_CRON +ENV ZEEK_INTEL_REFRESH_ON_ENTRYPOINT $ZEEK_INTEL_REFRESH_ON_ENTRYPOINT +ENV ZEEK_INTEL_REFRESH_ON_DEPLOY $ZEEK_INTEL_REFRESH_ON_DEPLOY +ENV ZEEK_INTEL_REFRESH_CRON_EXPRESSION $ZEEK_INTEL_REFRESH_CRON_EXPRESSION ENV ZEEK_AUTO_ANALYZE_PCAP_FILES $ZEEK_AUTO_ANALYZE_PCAP_FILES ENV ZEEK_AUTO_ANALYZE_PCAP_THREADS $ZEEK_AUTO_ANALYZE_PCAP_THREADS ENV ZEEK_INTEL_ITEM_EXPIRATION $ZEEK_INTEL_ITEM_EXPIRATION -ENV ZEEK_INTEL_REFRESH_CRON_EXPRESSION $ZEEK_INTEL_REFRESH_CRON_EXPRESSION ENV ZEEK_INTEL_REFRESH_THREADS $ZEEK_INTEL_REFRESH_THREADS ENV ZEEK_INTEL_FEED_SINCE $ZEEK_INTEL_FEED_SINCE eNV ZEEK_INTEL_FEED_SSL_CERTIFICATE_VERIFICATION $ZEEK_INTEL_FEED_SSL_CERTIFICATE_VERIFICATION diff --git a/api/requirements.txt b/api/requirements.txt index 564b1d8f8..8b8148310 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -1,9 +1,9 @@ pytz==2021.3 Flask==2.3.2 -gunicorn==20.1.0 +gunicorn==22.0.0 opensearch-py==2.5.0 requests==2.31.0 regex==2022.3.2 dateparser==1.1.1 -elasticsearch==8.12.1 -elasticsearch-dsl==8.12.0 \ No newline at end of file +elasticsearch==8.13.0 +elasticsearch-dsl==8.13.0 \ No newline at end of file diff --git a/arkime/arkime_regression_test_harness/docker-compose.yml b/arkime/arkime_regression_test_harness/docker-compose.yml index 8bd2d5f5b..bd623f32e 100644 --- a/arkime/arkime_regression_test_harness/docker-compose.yml +++ b/arkime/arkime_regression_test_harness/docker-compose.yml @@ -1,5 +1,3 @@ -version: '3.7' - services: opensearch: image: opensearchproject/opensearch:1.0.0 diff --git a/arkime/etc/config.ini b/arkime/etc/config.ini index bb09362af..9e7bafee6 100644 --- a/arkime/etc/config.ini +++ b/arkime/etc/config.ini @@ -46,6 +46,7 @@ passwordSecret=Malcolm pcapDir=/data/pcap/processed plugins=wise.so pluginsDir=/opt/arkime/plugins +queryAllIndices=false queryExtraIndices= readTruncatedPackets=true reqBodyOnlyUtf8=true @@ -514,6 +515,7 @@ zeek.files.md5=db:zeek.files.md5;group:zeek_files;kind:termfield;viewerOnly:true zeek.files.sha1=db:zeek.files.sha1;group:zeek_files;kind:termfield;viewerOnly:true;friendly:SHA1 Digest;help:SHA1 Digest zeek.files.sha256=db:zeek.files.sha256;group:zeek_files;kind:termfield;viewerOnly:true;friendly:SHA256 Digest;help:SHA256 Digest zeek.files.extracted=db:zeek.files.extracted;group:zeek_files;kind:termfield;viewerOnly:true;friendly:Extracted Filename;help:Extracted Filename +zeek.files.extracted_uri=db:zeek.files.extracted_uri;group:zeek_files;kind:termfield;viewerOnly:true;friendly:Extracted Filename URL;help:Extracted File URL zeek.files.extracted_cutoff=db:zeek.files.extracted_cutoff;group:zeek_files;kind:termfield;viewerOnly:true;friendly:Truncated;help:Truncated zeek.files.extracted_size=db:zeek.files.extracted_size;group:zeek_files;kind:integer;viewerOnly:true;friendly:Extracted Bytes;help:Extracted Bytes @@ -2661,7 +2663,7 @@ o_zeek_ecat_log_address=require:zeek.ecat_log_address;title:Zeek ecat_log_addres o_zeek_ecat_registers=require:zeek.ecat_registers;title:Zeek ecat_registers.log;fields:zeek.ecat_registers.command,zeek.ecat_registers.server_addr,zeek.ecat_registers.register_type,zeek.ecat_registers.register_addr,zeek.ecat_registers.data o_zeek_ecat_soe_info=require:zeek.ecat_soe_info;title:Zeek ecat_soe_info.log;fields:zeek.ecat_soe_info.opcode,zeek.ecat_soe_info.incomplete,zeek.ecat_soe_info.error,zeek.ecat_soe_info.drive_num,zeek.ecat_soe_info.element,zeek.ecat_soe_info.index o_zeek_enip=require:zeek.enip;title:Zeek enip.log;fields:zeek.enip.enip_command,zeek.enip.enip_command_code,zeek.enip.length,zeek.enip.session_handle,zeek.enip.enip_status,zeek.enip.sender_context,zeek.enip.options -o_zeek_files=require:zeek.files;title:Zeek files.log;fields:zeek.files.tx_hosts,zeek.files.rx_hosts,zeek.files.conn_uids,zeek.files.source,zeek.files.depth,zeek.files.analyzers,zeek.files.mime_type,zeek.files.filename,zeek.files.ftime,zeek.files.duration,zeek.files.local_orig,zeek.files.seen_bytes,zeek.files.total_bytes,zeek.files.missing_bytes,zeek.files.overflow_bytes,zeek.files.timedout,zeek.files.parent_fuid,zeek.files.md5,zeek.files.sha1,zeek.files.sha256,zeek.files.extracted,zeek.files.extracted_cutoff,zeek.files.extracted_size +o_zeek_files=require:zeek.files;title:Zeek files.log;fields:zeek.files.tx_hosts,zeek.files.rx_hosts,zeek.files.conn_uids,zeek.files.source,zeek.files.depth,zeek.files.analyzers,zeek.files.mime_type,zeek.files.filename,zeek.files.ftime,zeek.files.duration,zeek.files.local_orig,zeek.files.seen_bytes,zeek.files.total_bytes,zeek.files.missing_bytes,zeek.files.overflow_bytes,zeek.files.timedout,zeek.files.parent_fuid,zeek.files.md5,zeek.files.sha1,zeek.files.sha256,zeek.files.extracted,zeek.files.extracted_uri,zeek.files.extracted_cutoff,zeek.files.extracted_size o_zeek_ftp=require:zeek.ftp;title:Zeek ftp.log;fields:zeek.ftp.command,zeek.ftp.arg,zeek.ftp.mime_type,zeek.ftp.file_size,zeek.ftp.reply_code,zeek.ftp.reply_msg,zeek.ftp.data_channel.passive,zeek.ftp.data_channel.orig_h,zeek.ftp.data_channel.resp_h,zeek.ftp.data_channel.resp_p o_zeek_genisys=require:zeek.genisys;title:Zeek genisys.log;fields:zeek.genisys.header,zeek.genisys.server,zeek.genisys.direction,zeek.genisys.crc_transmitted,zeek.genisys.crc_calculated,zeek.genisys.payload.address,zeek.genisys.payload.data o_zeek_gquic=require:zeek.gquic;title:Zeek gquic.log;fields:zeek.gquic.version,zeek.gquic.server_name,zeek.gquic.user_agent,zeek.gquic.tag_count,zeek.gquic.cyu,zeek.gquic.cyutags diff --git a/arkime/scripts/docker_entrypoint.sh b/arkime/scripts/docker_entrypoint.sh index 636447bb0..66b6aade4 100755 --- a/arkime/scripts/docker_entrypoint.sh +++ b/arkime/scripts/docker_entrypoint.sh @@ -14,6 +14,7 @@ ARKIME_CONFIG_FILE="${ARKIME_DIR}"/etc/config.ini ARKIME_PASSWORD_SECRET=${ARKIME_PASSWORD_SECRET:-"Malcolm"} ARKIME_FREESPACEG=${ARKIME_FREESPACEG:-"10%"} ARKIME_ROTATE_INDEX=${ARKIME_ROTATE_INDEX:-"daily"} +ARKIME_QUERY_ALL_INDICES=${ARKIME_QUERY_ALL_INDICES:-"false"} MALCOLM_NETWORK_INDEX_PATTERN=${MALCOLM_NETWORK_INDEX_PATTERN:-} ARKIME_DEBUG_LEVEL=${ARKIME_DEBUG_LEVEL:-0} CAPTURE_INTERFACE=${PCAP_IFACE:-} @@ -62,6 +63,7 @@ if [[ ! -f "${ARKIME_CONFIG_FILE}" ]] && [[ -r "${ARKIME_DIR}"/etc/config.orig.i sed -i "s/^\(passwordSecret=\).*/\1"${ARKIME_PASSWORD_SECRET}"/" "${ARKIME_CONFIG_FILE}" sed -i "s/^\(freeSpaceG=\).*/\1"${ARKIME_FREESPACEG}"/" "${ARKIME_CONFIG_FILE}" sed -i "s/^\(rotateIndex=\).*/\1"${ARKIME_ROTATE_INDEX}"/" "${ARKIME_CONFIG_FILE}" + sed -i "s/^\(queryAllIndices=\).*/\1"${ARKIME_QUERY_ALL_INDICES}"/" "${ARKIME_CONFIG_FILE}" sed -i "s/^\(queryExtraIndices=\).*/\1"${MALCOLM_NETWORK_INDEX_PATTERN}"/" "${MALCOLM_NETWORK_INDEX_PATTERN}" "${ARKIME_CONFIG_FILE}" sed -i "s/^\(debug=\).*/\1"${ARKIME_DEBUG_LEVEL}"/" "${ARKIME_CONFIG_FILE}" sed -i "s/^\(viewPort=\).*/\1"${VIEWER_PORT}"/" "${ARKIME_CONFIG_FILE}" @@ -151,11 +153,26 @@ if [[ ! -f "${ARKIME_CONFIG_FILE}" ]] && [[ -r "${ARKIME_DIR}"/etc/config.orig.i sed -i "s/^\(userAutoCreateTmpl=\)/# \1/" "${ARKIME_CONFIG_FILE}" sed -i "s/^\(wiseHost=\)/# \1/" "${ARKIME_CONFIG_FILE}" sed -i "s/^\(wisePort=\)/# \1/" "${ARKIME_CONFIG_FILE}" - sed -i "s/^\(plugins=\)/# \1/" "${ARKIME_CONFIG_FILE}" + sed -i "s/^\(plugins=\).*/# \1/" "${ARKIME_CONFIG_FILE}" sed -i "s/^\(viewerPlugins=\)/# \1/" "${ARKIME_CONFIG_FILE}" sed -i '/^\[custom-fields\]/,$d' "${ARKIME_CONFIG_FILE}" fi + # enable ja4+ plugin if it's present + JA4_PLUGIN_FILE="${ARKIME_DIR}/plugins/ja4plus.$(dpkg --print-architecture).so" + if [[ -f "${JA4_PLUGIN_FILE}" ]]; then + JA4_PLUGIN_FILE_BASE="$(basename "${JA4_PLUGIN_FILE}")" + JA4_PLUGIN_FILE_ESCAPED="$(echo "${JA4_PLUGIN_FILE_BASE}" | sed 's@\.@\\\.@g')" + # clean up old references to the plugin + sed -i "/plugins=.*${JA4_PLUGIN_FILE_ESCAPED}/s/;\?${JA4_PLUGIN_FILE_ESCAPED}//g" "${ARKIME_CONFIG_FILE}" + # append ja4 plugin filename to end of plugins= line in config file and uncomment it if necessary + sed -i "s/^#*[[:space:]]*\(plugins=\)/\1${JA4_PLUGIN_FILE_BASE};/" "${ARKIME_CONFIG_FILE}" + # squash semicolons + sed -i 's/;\{2,\}/;/g' "${ARKIME_CONFIG_FILE}" + # remove trailing semicolon from plugins= line if it exists + sed -i "s/^\(plugins=.*\)[[:space:]]*;[[:space:]]*$/\1/" "${ARKIME_CONFIG_FILE}" + fi + chmod 600 "${ARKIME_CONFIG_FILE}" || true [[ -n ${PUID} ]] && chown -f ${PUID} "${ARKIME_CONFIG_FILE}" || true [[ -n ${PGID} ]] && chown -f :${PGID} "${ARKIME_CONFIG_FILE}" || true diff --git a/arkime/wise/source.zeeklogs.js b/arkime/wise/source.zeeklogs.js index 85d50975e..b92557cc7 100644 --- a/arkime/wise/source.zeeklogs.js +++ b/arkime/wise/source.zeeklogs.js @@ -966,6 +966,7 @@ class MalcolmSource extends WISESource { "zeek.files.extracted", "zeek.files.extracted_cutoff", "zeek.files.extracted_size", + "zeek.files.extracted_uri", "zeek.files.filename", "zeek.files.ftime", "zeek.files.local_orig", @@ -2253,8 +2254,10 @@ class MalcolmSource extends WISESource { this.api.addValueAction("malcolm_websearch_mime", { name: "Media Type Registry", url: 'https://www.iana.org/assignments/media-types/%TEXT%', fields: mimeFieldsStr }); // add right-click for extracted files from zeek - var carvedFieldsStr = allFields.filter(value => /^zeek\.files\.extracted$/i.test(value)).join(','); - this.api.addValueAction("malcolm_carved_file_quarantined", { name: "Download", url: "/extracted-files/%TEXT%", fields: carvedFieldsStr }); + // var carvedFieldsStr = allFields.filter(value => /^zeek\.files\.extracted$/i.test(value)).join(','); + // this.api.addValueAction("malcolm_carved_file", { name: "Download", url: "/extracted-files/%TEXT%", fields: carvedFieldsStr }); + var carvedFieldsUrlStr = allFields.filter(value => /^zeek\.files\.extracted_uri$/i.test(value)).join(','); + this.api.addValueAction("malcolm_carved_file_url", { name: "Download", url: "/%TEXT%", fields: carvedFieldsUrlStr }); // add right-clicks for pivoting into dashboards from Arkime (see nginx.conf) var filterLabel = "OpenSearch Dashboards %DBFIELD%"; @@ -2266,11 +2269,6 @@ class MalcolmSource extends WISESource { var apiURL = "/mapi/agg/%DBFIELD%?from=%ISOSTART%&to=%ISOSTOP%"; this.api.addFieldAction("malcolm_mapi_fields_zeek", { name: apiLabel, url: apiURL, all: true }); - // add rick-click for extracted-files - var extractedFilesLabel = "Browse Extracted Files"; - var extractedFilesURL = "/extracted-files/"; - this.api.addFieldAction("malcolm_mapi_field_extracted_files", { name: extractedFilesLabel, url: extractedFilesURL, fields: carvedFieldsStr }); - // add right-click for viewing original JSON document this.api.addValueAction("malcolm_json_source", { name: "%DBFIELD% Document(s) JSON", url: "/mapi/document?filter={\"%DBFIELD%\":\"%TEXT%\"}", fields: "communityId,event.id,id,network.community_id,rootId,zeek.fuid,zeek.uid" }); diff --git a/config/arkime.env.example b/config/arkime.env.example index 04423e865..512842cdd 100644 --- a/config/arkime.env.example +++ b/config/arkime.env.example @@ -6,6 +6,9 @@ ARKIME_FREESPACEG=10% # How often to create a new index in OpenSearch/Elasticsearch # https://arkime.com/settings#rotateIndex ARKIME_ROTATE_INDEX=daily +# Always query all indices instead of trying to calculate which ones +# https://arkime.com/settings#queryAllIndices +ARKIME_QUERY_ALL_INDICES=false # debug flag for config.ini (https://arkime.com/settings#debug) ARKIME_DEBUG_LEVEL=0 diff --git a/config/dashboards-helper.env.example b/config/dashboards-helper.env.example index 268e653e1..98f46f18b 100644 --- a/config/dashboards-helper.env.example +++ b/config/dashboards-helper.env.example @@ -1,5 +1,7 @@ # Whether or not to set OpenSearch Dashboards to dark mode DASHBOARDS_DARKMODE=true +# A prefix to prepend to the titles of imported Malcolm dashbaords +DASHBOARDS_PREFIX= # The maximum cumulative size of OpenSearch indices containing network traffic metadata # before which the oldest indices will be deleted ('' to disable storage-based index pruning). OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT=0 diff --git a/config/logstash.env.example b/config/logstash.env.example index afc5acd4b..3db494a47 100644 --- a/config/logstash.env.example +++ b/config/logstash.env.example @@ -10,8 +10,8 @@ LOGSTASH_SEVERITY_SCORING=true # Whether or not Logstash will perform a reverse DNS lookup for external IP addresses LOGSTASH_REVERSE_DNS=false # Which types of logs will be enriched via NetBox (comma-separated list of provider.dataset, or the string all to enrich all logs) -LOGSTASH_NETBOX_ENRICHMENT_DATASETS=suricata.alert,zeek.conn,zeek.known_hosts,zeek.known_services,zeek.notice,zeek.signatures,zeek.software,zeek.weird +LOGSTASH_NETBOX_ENRICHMENT_DATASETS=suricata.alert,zeek.conn,zeek.dhcp,zeek.dns,zeek.known_hosts,zeek.known_services,zeek.ntlm,zeek.notice,zeek.signatures,zeek.software,zeek.weird # Zeek log types that will be ignored (dropped) by LogStash -LOGSTASH_ZEEK_IGNORED_LOGS=analyzer,broker,capture_loss,cluster,config,loaded_scripts,packet_filter,png,print,prof,reporter,stats,stderr,stdout +LOGSTASH_ZEEK_IGNORED_LOGS=analyzer,broker,cluster,config,loaded_scripts,packet_filter,png,print,prof,reporter,stderr,stdout # Logstash memory allowance and other Java options LS_JAVA_OPTS=-server -Xmx2500m -Xms2500m -Xss1536k -XX:-HeapDumpOnOutOfMemoryError -Djava.security.egd=file:/dev/./urandom -Dlog4j.formatMsgNoLookups=true \ No newline at end of file diff --git a/config/zeek-live.env.example b/config/zeek-live.env.example index abf8c7a4d..7a233810e 100644 --- a/config/zeek-live.env.example +++ b/config/zeek-live.env.example @@ -5,6 +5,5 @@ ZEEK_LIVE_CAPTURE=false ZEEK_DISABLE_STATS=true ZEEK_PCAP_PROCESSOR=false -ZEEK_CRON=true ZEEK_LOG_PATH=/zeek/live EXTRACT_FILES_PATH=/zeek/extract_files \ No newline at end of file diff --git a/config/zeek-offline.env.example b/config/zeek-offline.env.example index f57f536b0..75c2e130a 100644 --- a/config/zeek-offline.env.example +++ b/config/zeek-offline.env.example @@ -10,4 +10,10 @@ ZEEK_AUTO_ANALYZE_PCAP_THREADS=1 ZEEK_ROTATED_PCAP=true ZEEK_PCAP_PROCESSOR=true -ZEEK_CRON=false \ No newline at end of file + +# Specifies whether or not to refresh Zeek Intelligence Framework files in +# the container entrypoint +ZEEK_INTEL_REFRESH_ON_ENTRYPOINT=true +# Specifies a cron expression indicating the refresh interval for generating the +# Zeek Intelligence Framework files (or blank to disable automatic refresh) +ZEEK_INTEL_REFRESH_CRON_EXPRESSION= \ No newline at end of file diff --git a/config/zeek.env.example b/config/zeek.env.example index 400362120..66a04c0c6 100644 --- a/config/zeek.env.example +++ b/config/zeek.env.example @@ -13,9 +13,6 @@ ZEEK_INTEL_ITEM_EXPIRATION=-1min ZEEK_INTEL_FEED_SINCE= # Whether or not to require SSL certificate verification when querying a TAXII or MISP feed ZEEK_INTEL_FEED_SSL_CERTIFICATE_VERIFICATION=false -# Specifies a cron expression indicating the refresh interval for generating the -# Zeek Intelligence Framework files ('' disables automatic refresh) -ZEEK_INTEL_REFRESH_CRON_EXPRESSION= # Number of threads to use for querying feeds for generating Zeek Intelligence Framework files ZEEK_INTEL_REFRESH_THREADS=2 # Determines the file extraction behavior for file transfers detected by Zeek @@ -32,6 +29,12 @@ EXTRACTED_FILE_PRESERVATION=quarantined EXTRACTED_FILE_MIN_BYTES=64 # The maximum size (in bytes) for files to be extracted by Zeek EXTRACTED_FILE_MAX_BYTES=134217728 +# Prune ./zeek-logs/extract_files/ when it exceeds this size... +EXTRACTED_FILE_PRUNE_THRESHOLD_MAX_SIZE=1TB +# ... or when the *total* disk usage exceeds this percentage +EXTRACTED_FILE_PRUNE_THRESHOLD_TOTAL_DISK_USAGE_PERCENT=0 +# Interval in seconds for checking whether to prune ./zeek-logs/extract_files/ +EXTRACTED_FILE_PRUNE_INTERVAL_SECONDS=300 # Rate limiting for VirusTotal, ClamAV, YARA and capa with Zeek-extracted files VTOT_REQUESTS_PER_MINUTE=4 CLAMD_MAX_REQUESTS=8 diff --git a/dashboards/anomaly_detectors/action_result_user.json b/dashboards/anomaly_detectors/action_result_user.json index 86bf8457b..da4996f34 100644 --- a/dashboards/anomaly_detectors/action_result_user.json +++ b/dashboards/anomaly_detectors/action_result_user.json @@ -5,9 +5,6 @@ "indices": [ "MALCOLM_NETWORK_INDEX_PATTERN_REPLACER" ], - "category_field": [ - "network.protocol" - ], "feature_attributes": [ { "feature_name": "event_action", @@ -81,5 +78,10 @@ "interval": 10, "unit": "MINUTES" } - } + }, + "last_update_time": 1714421906634, + "category_field": [ + "event.action", + "event.result" + ] } \ No newline at end of file diff --git a/dashboards/anomaly_detectors/file_mime_type.json b/dashboards/anomaly_detectors/file_mime_type.json index 9f3e45afe..662f456ba 100644 --- a/dashboards/anomaly_detectors/file_mime_type.json +++ b/dashboards/anomaly_detectors/file_mime_type.json @@ -43,5 +43,9 @@ "interval": 1, "unit": "Minutes" } - } + }, + "last_update_time": 1714421906634, + "category_field": [ + "file.mime_type" + ] } diff --git a/dashboards/anomaly_detectors/network_protocol.json b/dashboards/anomaly_detectors/network_protocol.json index 770223aa9..9b0f8fd11 100644 --- a/dashboards/anomaly_detectors/network_protocol.json +++ b/dashboards/anomaly_detectors/network_protocol.json @@ -43,5 +43,9 @@ "interval": 1, "unit": "Minutes" } - } + }, + "last_update_time": 1714421906634, + "category_field": [ + "network.protocol" + ] } diff --git a/dashboards/anomaly_detectors/total_bytes.json b/dashboards/anomaly_detectors/total_bytes.json index c66a125bb..e7cbb680d 100644 --- a/dashboards/anomaly_detectors/total_bytes.json +++ b/dashboards/anomaly_detectors/total_bytes.json @@ -43,5 +43,10 @@ "interval": 1, "unit": "Minutes" } - } + }, + "last_update_time": 1714421906634, + "category_field": [ + "source.ip", + "destination.ip" + ] } \ No newline at end of file diff --git a/dashboards/dashboards/024062a6-48d6-498f-a91a-3bf2da3a3cd3.json b/dashboards/dashboards/024062a6-48d6-498f-a91a-3bf2da3a3cd3.json index 8c6ea75d6..8f7da041a 100644 --- a/dashboards/dashboards/024062a6-48d6-498f-a91a-3bf2da3a3cd3.json +++ b/dashboards/dashboards/024062a6-48d6-498f-a91a-3bf2da3a3cd3.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-10-12T18:27:47.478Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzg4MywxXQ==", "attributes": { "title": "X.509", @@ -608,4 +608,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/03207c00-d07e-11ec-b4a7-d1b4003706b7.json b/dashboards/dashboards/03207c00-d07e-11ec-b4a7-d1b4003706b7.json index 830b701bf..f445b4a93 100644 --- a/dashboards/dashboards/03207c00-d07e-11ec-b4a7-d1b4003706b7.json +++ b/dashboards/dashboards/03207c00-d07e-11ec-b4a7-d1b4003706b7.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-05-10T16:42:42.241Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEyMTAsMV0=", "attributes": { "title": "GENISYS", @@ -381,4 +381,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/05e3e000-f118-11e9-acda-83a8e29e1a24.json b/dashboards/dashboards/05e3e000-f118-11e9-acda-83a8e29e1a24.json index d7f067053..4c106482e 100644 --- a/dashboards/dashboards/05e3e000-f118-11e9-acda-83a8e29e1a24.json +++ b/dashboards/dashboards/05e3e000-f118-11e9-acda-83a8e29e1a24.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-05-11T13:57:03.753Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzE1OTcsMV0=", "attributes": { "title": "LDAP", @@ -478,4 +478,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/078b9aa5-9bd4-4f02-ae5e-cf80fa6f887b.json b/dashboards/dashboards/078b9aa5-9bd4-4f02-ae5e-cf80fa6f887b.json index 5f58138a1..4b746f808 100644 --- a/dashboards/dashboards/078b9aa5-9bd4-4f02-ae5e-cf80fa6f887b.json +++ b/dashboards/dashboards/078b9aa5-9bd4-4f02-ae5e-cf80fa6f887b.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T15:29:57.350Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzE5MDYsMV0=", "attributes": { "title": "FTP", @@ -382,4 +382,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/0a490422-0ce9-44bf-9a2d-19329ddde8c3.json b/dashboards/dashboards/0a490422-0ce9-44bf-9a2d-19329ddde8c3.json index 2a6bfa266..287971fc9 100644 --- a/dashboards/dashboards/0a490422-0ce9-44bf-9a2d-19329ddde8c3.json +++ b/dashboards/dashboards/0a490422-0ce9-44bf-9a2d-19329ddde8c3.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-11-16T21:13:35.008Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEzMzEsMV0=", "attributes": { "title": "PE", @@ -389,4 +389,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/0ad3d7c2-3441-485e-9dfe-dbb22e84e576.json b/dashboards/dashboards/0ad3d7c2-3441-485e-9dfe-dbb22e84e576.json index 75ddb6bdd..9e911124e 100644 --- a/dashboards/dashboards/0ad3d7c2-3441-485e-9dfe-dbb22e84e576.json +++ b/dashboards/dashboards/0ad3d7c2-3441-485e-9dfe-dbb22e84e576.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-05-04T20:30:33.149Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEzNjIsMV0=", "attributes": { "title": "Overview", @@ -467,4 +467,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/0aed0e23-c8ac-4f2b-9f68-d04b6e7666b0.json b/dashboards/dashboards/0aed0e23-c8ac-4f2b-9f68-d04b6e7666b0.json index a35d26d64..1055983d2 100644 --- a/dashboards/dashboards/0aed0e23-c8ac-4f2b-9f68-d04b6e7666b0.json +++ b/dashboards/dashboards/0aed0e23-c8ac-4f2b-9f68-d04b6e7666b0.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:10.810Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEzMSwxXQ==", "attributes": { "title": "Connections - Destination - Top Connection Duration", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/0b2354ae-0fe9-4fd9-b156-1c3870e5c7aa.json b/dashboards/dashboards/0b2354ae-0fe9-4fd9-b156-1c3870e5c7aa.json index 2a17f5301..f66753a14 100644 --- a/dashboards/dashboards/0b2354ae-0fe9-4fd9-b156-1c3870e5c7aa.json +++ b/dashboards/dashboards/0b2354ae-0fe9-4fd9-b156-1c3870e5c7aa.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T18:02:01.961Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzMxNDEsMV0=", "attributes": { "title": "SIP", @@ -524,4 +524,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/11be6381-beef-40a7-bdce-88c5398392fc.json b/dashboards/dashboards/11be6381-beef-40a7-bdce-88c5398392fc.json index b9a5a8126..d4e930578 100644 --- a/dashboards/dashboards/11be6381-beef-40a7-bdce-88c5398392fc.json +++ b/dashboards/dashboards/11be6381-beef-40a7-bdce-88c5398392fc.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T19:07:48.772Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzM4MjQsMV0=", "attributes": { "title": "Tunnels", @@ -349,4 +349,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/11ddd980-e388-11e9-b568-cf17de8e860c.json b/dashboards/dashboards/11ddd980-e388-11e9-b568-cf17de8e860c.json index b277bfedb..87707105f 100644 --- a/dashboards/dashboards/11ddd980-e388-11e9-b568-cf17de8e860c.json +++ b/dashboards/dashboards/11ddd980-e388-11e9-b568-cf17de8e860c.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T16:02:59.762Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzIzNjUsMV0=", "attributes": { "title": "QUIC", @@ -383,4 +383,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/12e3a130-d83b-11eb-a0b0-f328ce09b0b7.json b/dashboards/dashboards/12e3a130-d83b-11eb-a0b0-f328ce09b0b7.json index ebed9bfb7..4e1b49d2b 100644 --- a/dashboards/dashboards/12e3a130-d83b-11eb-a0b0-f328ce09b0b7.json +++ b/dashboards/dashboards/12e3a130-d83b-11eb-a0b0-f328ce09b0b7.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-10-25T21:21:24.534Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzkwNiwxXQ==", "attributes": { "title": "ICS Best Guess", @@ -338,4 +338,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/152f29dc-51a2-4f53-93e9-6e92765567b8.json b/dashboards/dashboards/152f29dc-51a2-4f53-93e9-6e92765567b8.json index 09deb82f8..4ce8a4986 100644 --- a/dashboards/dashboards/152f29dc-51a2-4f53-93e9-6e92765567b8.json +++ b/dashboards/dashboards/152f29dc-51a2-4f53-93e9-6e92765567b8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-11-10T19:05:19.809Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk1NywxXQ==", "attributes": { "title": "Modbus", @@ -848,4 +848,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/1cc01ff0-5205-11ec-a62c-7bc80e88f3f0.json b/dashboards/dashboards/1cc01ff0-5205-11ec-a62c-7bc80e88f3f0.json index 81b246aa5..a5acadef3 100644 --- a/dashboards/dashboards/1cc01ff0-5205-11ec-a62c-7bc80e88f3f0.json +++ b/dashboards/dashboards/1cc01ff0-5205-11ec-a62c-7bc80e88f3f0.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-11-30T18:12:05.004Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzE0MDYsMV0=", "attributes": { "title": "OSPF", @@ -419,4 +419,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/1ce42250-3f99-11e9-a58e-8bdedb0915e8.json b/dashboards/dashboards/1ce42250-3f99-11e9-a58e-8bdedb0915e8.json index b8206ee94..7a8144e6b 100644 --- a/dashboards/dashboards/1ce42250-3f99-11e9-a58e-8bdedb0915e8.json +++ b/dashboards/dashboards/1ce42250-3f99-11e9-a58e-8bdedb0915e8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:16.017Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzIzOCwxXQ==", "attributes": { "title": "Connections - Source - Sum of Total Bytes (region map)", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/1fff49f6-0199-4a0f-820b-721aff9ff1f1.json b/dashboards/dashboards/1fff49f6-0199-4a0f-820b-721aff9ff1f1.json index 758b41493..56d07e97b 100644 --- a/dashboards/dashboards/1fff49f6-0199-4a0f-820b-721aff9ff1f1.json +++ b/dashboards/dashboards/1fff49f6-0199-4a0f-820b-721aff9ff1f1.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-05-04T17:52:19.656Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzE2MSwxXQ==", "attributes": { "title": "Zeek Weird", @@ -278,4 +278,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/29a1b290-eb98-11e9-a384-0fcf32210194.json b/dashboards/dashboards/29a1b290-eb98-11e9-a384-0fcf32210194.json index 1917c8766..699d27535 100644 --- a/dashboards/dashboards/29a1b290-eb98-11e9-a384-0fcf32210194.json +++ b/dashboards/dashboards/29a1b290-eb98-11e9-a384-0fcf32210194.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-02-14T15:38:50.396Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEwNDMsMV0=", "attributes": { "title": "EtherNet/IP", @@ -526,4 +526,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/2bec1490-eb94-11e9-a384-0fcf32210194.json b/dashboards/dashboards/2bec1490-eb94-11e9-a384-0fcf32210194.json index 8f9f90ff6..e0839bf26 100644 --- a/dashboards/dashboards/2bec1490-eb94-11e9-a384-0fcf32210194.json +++ b/dashboards/dashboards/2bec1490-eb94-11e9-a384-0fcf32210194.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-18T21:25:43.221Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk1NiwxXQ==", "attributes": { "title": "BACnet", @@ -585,4 +585,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/2cc56240-e460-11ed-a9d5-9f591c284cb4.json b/dashboards/dashboards/2cc56240-e460-11ed-a9d5-9f591c284cb4.json index afeba696d..4a86908eb 100644 --- a/dashboards/dashboards/2cc56240-e460-11ed-a9d5-9f591c284cb4.json +++ b/dashboards/dashboards/2cc56240-e460-11ed-a9d5-9f591c284cb4.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-04-26T19:48:24.081Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk1MSwxXQ==", "attributes": { "title": "Synchrophasor", @@ -638,4 +638,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/2cf94cd0-ecab-40a5-95a7-8419f3a39cd9.json b/dashboards/dashboards/2cf94cd0-ecab-40a5-95a7-8419f3a39cd9.json index 9283cd75d..0e7b1f874 100644 --- a/dashboards/dashboards/2cf94cd0-ecab-40a5-95a7-8419f3a39cd9.json +++ b/dashboards/dashboards/2cf94cd0-ecab-40a5-95a7-8419f3a39cd9.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-05-11T14:11:53.521Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzE3OTQsMV0=", "attributes": { "title": "DNS", @@ -522,4 +522,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/2d98bb8e-214c-4374-837b-20e1bcd63a5e.json b/dashboards/dashboards/2d98bb8e-214c-4374-837b-20e1bcd63a5e.json index 403f16059..cfe29e051 100644 --- a/dashboards/dashboards/2d98bb8e-214c-4374-837b-20e1bcd63a5e.json +++ b/dashboards/dashboards/2d98bb8e-214c-4374-837b-20e1bcd63a5e.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:21.144Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzMyOCwxXQ==", "attributes": { "title": "DHCP", @@ -260,7 +260,7 @@ "version": "WzMzNSwxXQ==", "attributes": { "title": "DHCP - IP to MAC Assignment", - "visState": "{\"title\":\"DHCP - IP to MAC Assignment\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"zeek.dhcp.assigned_ip\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Assigned IP Address\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"zeek.dhcp.mac\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"MAC Address\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"source.ip\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Source IP Address\"}},{\"id\":\"5\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"destination.ip\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Destination IP Address\"}}]}", + "visState": "{\"title\":\"DHCP - IP to MAC Assignment\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"zeek.dhcp.assigned_ip\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Assigned IP Address\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"source.mac\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"MAC Address\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"source.ip\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Source IP Address\"}},{\"id\":\"5\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"destination.ip\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Destination IP Address\"}}]}", "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", "description": "", "version": 1, @@ -353,7 +353,7 @@ "description": "", "hits": 0, "columns": [ - "zeek.dhcp.mac", + "source.mac", "zeek.dhcp.assigned_ip", "destination.ip", "zeek.dhcp.host_name", @@ -386,4 +386,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/32587740-ef88-11e9-b38a-2db3ee640e88.json b/dashboards/dashboards/32587740-ef88-11e9-b38a-2db3ee640e88.json index 486cfa584..12fb191c4 100644 --- a/dashboards/dashboards/32587740-ef88-11e9-b38a-2db3ee640e88.json +++ b/dashboards/dashboards/32587740-ef88-11e9-b38a-2db3ee640e88.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T18:52:27.963Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzM2NDEsMV0=", "attributes": { "title": "Tabular Data Stream - RPC", @@ -278,4 +278,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/36ed695f-edcc-47c1-b0ec-50d20c93ce0f.json b/dashboards/dashboards/36ed695f-edcc-47c1-b0ec-50d20c93ce0f.json index 379ddd8b6..2a24f8c6f 100644 --- a/dashboards/dashboards/36ed695f-edcc-47c1-b0ec-50d20c93ce0f.json +++ b/dashboards/dashboards/36ed695f-edcc-47c1-b0ec-50d20c93ce0f.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-01-12T18:32:51.293Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEwMjMsMV0=", "attributes": { "title": "Zeek Intelligence", @@ -455,4 +455,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/37041ee1-79c0-4684-a436-3173b0e89876.json b/dashboards/dashboards/37041ee1-79c0-4684-a436-3173b0e89876.json index 7bcce0a43..4af306a73 100644 --- a/dashboards/dashboards/37041ee1-79c0-4684-a436-3173b0e89876.json +++ b/dashboards/dashboards/37041ee1-79c0-4684-a436-3173b0e89876.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-11-14T19:40:46.803Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk1NCwxXQ==", "attributes": { "title": "HTTP", @@ -656,4 +656,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/39abfe30-3f99-11e9-a58e-8bdedb0915e8.json b/dashboards/dashboards/39abfe30-3f99-11e9-a58e-8bdedb0915e8.json index 0494646a7..8ab4c8316 100644 --- a/dashboards/dashboards/39abfe30-3f99-11e9-a58e-8bdedb0915e8.json +++ b/dashboards/dashboards/39abfe30-3f99-11e9-a58e-8bdedb0915e8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:25.340Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzQxNCwxXQ==", "attributes": { "title": "Connections - Source - Top Connection Duration (region map)", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/42e831b9-41a9-4f35-8b7d-e1566d368773.json b/dashboards/dashboards/42e831b9-41a9-4f35-8b7d-e1566d368773.json index 431a69e37..4c8648984 100644 --- a/dashboards/dashboards/42e831b9-41a9-4f35-8b7d-e1566d368773.json +++ b/dashboards/dashboards/42e831b9-41a9-4f35-8b7d-e1566d368773.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-11-12T20:12:35.920Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEyMzMsMV0=", "attributes": { "title": "SMB", @@ -531,4 +531,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/432af556-c5c0-4cc3-8166-b274b4e3a406.json b/dashboards/dashboards/432af556-c5c0-4cc3-8166-b274b4e3a406.json index d6e17e227..28a8701da 100644 --- a/dashboards/dashboards/432af556-c5c0-4cc3-8166-b274b4e3a406.json +++ b/dashboards/dashboards/432af556-c5c0-4cc3-8166-b274b4e3a406.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T15:16:14.488Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzE4MjcsMV0=", "attributes": { "title": "DCE/RPC", @@ -454,4 +454,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/4a073440-b286-11eb-a4d4-09fa12a6ebd4.json b/dashboards/dashboards/4a073440-b286-11eb-a4d4-09fa12a6ebd4.json index a9c3d9f39..b3f97f28b 100644 --- a/dashboards/dashboards/4a073440-b286-11eb-a4d4-09fa12a6ebd4.json +++ b/dashboards/dashboards/4a073440-b286-11eb-a4d4-09fa12a6ebd4.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-05-11T19:19:14.565Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzE1OTcsMV0=", "attributes": { "title": "EtherCAT", @@ -352,4 +352,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/4a4bde20-4760-11ea-949c-bbb5a9feecbf.json b/dashboards/dashboards/4a4bde20-4760-11ea-949c-bbb5a9feecbf.json index 43b5060a6..3dc0fcd3c 100644 --- a/dashboards/dashboards/4a4bde20-4760-11ea-949c-bbb5a9feecbf.json +++ b/dashboards/dashboards/4a4bde20-4760-11ea-949c-bbb5a9feecbf.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:28.484Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzQ0OCwxXQ==", "attributes": { "title": "ICS/IoT Security Overview", @@ -504,4 +504,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/4e5f106e-c60a-4226-8f64-d534abb912ab.json b/dashboards/dashboards/4e5f106e-c60a-4226-8f64-d534abb912ab.json index 367b791b7..7233c114b 100644 --- a/dashboards/dashboards/4e5f106e-c60a-4226-8f64-d534abb912ab.json +++ b/dashboards/dashboards/4e5f106e-c60a-4226-8f64-d534abb912ab.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-11-14T19:36:48.975Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk1MiwxXQ==", "attributes": { "title": "SNMP", @@ -377,4 +377,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/50ced171-1b10-4c3f-8b67-2db9635661a6.json b/dashboards/dashboards/50ced171-1b10-4c3f-8b67-2db9635661a6.json index 57d8d5167..393cf9b03 100644 --- a/dashboards/dashboards/50ced171-1b10-4c3f-8b67-2db9635661a6.json +++ b/dashboards/dashboards/50ced171-1b10-4c3f-8b67-2db9635661a6.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T15:59:01.107Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzIzMTEsMV0=", "attributes": { "title": "MySQL", @@ -243,4 +243,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/543118a9-02d7-43fe-b669-b8652177fc37.json b/dashboards/dashboards/543118a9-02d7-43fe-b669-b8652177fc37.json index 18437071d..7e0fa30c3 100644 --- a/dashboards/dashboards/543118a9-02d7-43fe-b669-b8652177fc37.json +++ b/dashboards/dashboards/543118a9-02d7-43fe-b669-b8652177fc37.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T15:55:44.537Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzIyNDcsMV0=", "attributes": { "title": "NTLM", @@ -456,4 +456,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/55e332d0-3f99-11e9-a58e-8bdedb0915e8.json b/dashboards/dashboards/55e332d0-3f99-11e9-a58e-8bdedb0915e8.json index bb8112815..36af6edf3 100644 --- a/dashboards/dashboards/55e332d0-3f99-11e9-a58e-8bdedb0915e8.json +++ b/dashboards/dashboards/55e332d0-3f99-11e9-a58e-8bdedb0915e8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:32.623Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzQ5MiwxXQ==", "attributes": { "title": "Connections - Destination - Originator Bytes (region map)", @@ -135,4 +135,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/5694ca60-cbdf-11ec-a50a-5fedd672f5c5.json b/dashboards/dashboards/5694ca60-cbdf-11ec-a50a-5fedd672f5c5.json index b7819873a..f4adf4eb5 100644 --- a/dashboards/dashboards/5694ca60-cbdf-11ec-a50a-5fedd672f5c5.json +++ b/dashboards/dashboards/5694ca60-cbdf-11ec-a50a-5fedd672f5c5.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2024-01-08T22:17:37.689Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk1MSwxXQ==", "attributes": { "title": "Suricata Alerts", @@ -449,4 +449,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/60d78fbd-471c-4f59-a9e3-189b33a13644.json b/dashboards/dashboards/60d78fbd-471c-4f59-a9e3-189b33a13644.json index 41106a81e..3a7cd647f 100644 --- a/dashboards/dashboards/60d78fbd-471c-4f59-a9e3-189b33a13644.json +++ b/dashboards/dashboards/60d78fbd-471c-4f59-a9e3-189b33a13644.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:33.654Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzQ5NiwxXQ==", "attributes": { "title": "Connections - Destination - Sum of Total Bytes", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/665d1610-523d-11e9-a30e-e3576242f3ed.json b/dashboards/dashboards/665d1610-523d-11e9-a30e-e3576242f3ed.json index 76adbfa48..3e8662243 100644 --- a/dashboards/dashboards/665d1610-523d-11e9-a30e-e3576242f3ed.json +++ b/dashboards/dashboards/665d1610-523d-11e9-a30e-e3576242f3ed.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-05-04T18:24:09.052Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzExNTEsMV0=", "attributes": { "title": "Signatures", @@ -311,4 +311,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/677ee170-809e-11ed-8d5b-07069f823b6f.json b/dashboards/dashboards/677ee170-809e-11ed-8d5b-07069f823b6f.json index 73790187f..eed00cbbe 100644 --- a/dashboards/dashboards/677ee170-809e-11ed-8d5b-07069f823b6f.json +++ b/dashboards/dashboards/677ee170-809e-11ed-8d5b-07069f823b6f.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-11-14T20:55:46.977Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk1MSwxXQ==", "attributes": { "title": "Asset Interaction Analysis", @@ -629,4 +629,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/76f2f912-80da-44cd-ab66-6a73c8344cc3.json b/dashboards/dashboards/76f2f912-80da-44cd-ab66-6a73c8344cc3.json index acc870ea1..86a365dd9 100644 --- a/dashboards/dashboards/76f2f912-80da-44cd-ab66-6a73c8344cc3.json +++ b/dashboards/dashboards/76f2f912-80da-44cd-ab66-6a73c8344cc3.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:36.060Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzUwOSwxXQ==", "attributes": { "title": "IRC", @@ -349,4 +349,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/77fc9960-3f99-11e9-a58e-8bdedb0915e8.json b/dashboards/dashboards/77fc9960-3f99-11e9-a58e-8bdedb0915e8.json index 0086705a9..1249ff6ec 100644 --- a/dashboards/dashboards/77fc9960-3f99-11e9-a58e-8bdedb0915e8.json +++ b/dashboards/dashboards/77fc9960-3f99-11e9-a58e-8bdedb0915e8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:37.074Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzUxOSwxXQ==", "attributes": { "title": "Connections - Destination - Responder Bytes (region map)", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/7f41913f-cba8-43f5-82a8-241b7ead03e0.json b/dashboards/dashboards/7f41913f-cba8-43f5-82a8-241b7ead03e0.json index 5102c24a3..b96a1c91a 100644 --- a/dashboards/dashboards/7f41913f-cba8-43f5-82a8-241b7ead03e0.json +++ b/dashboards/dashboards/7f41913f-cba8-43f5-82a8-241b7ead03e0.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T16:29:37.280Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzI4NjEsMV0=", "attributes": { "title": "RDP", @@ -421,4 +421,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/7f77b58a-df3e-4cc2-b782-fd7f8bad8ffb.json b/dashboards/dashboards/7f77b58a-df3e-4cc2-b782-fd7f8bad8ffb.json index 532bab29e..1c0850afb 100644 --- a/dashboards/dashboards/7f77b58a-df3e-4cc2-b782-fd7f8bad8ffb.json +++ b/dashboards/dashboards/7f77b58a-df3e-4cc2-b782-fd7f8bad8ffb.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-10-12T14:50:34.705Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzg4MCwxXQ==", "attributes": { "title": "SSL", @@ -708,4 +708,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/82da3101-2a9c-4ae2-bb61-d447a3fbe673.json b/dashboards/dashboards/82da3101-2a9c-4ae2-bb61-d447a3fbe673.json index b53c6fb58..07761f046 100644 --- a/dashboards/dashboards/82da3101-2a9c-4ae2-bb61-d447a3fbe673.json +++ b/dashboards/dashboards/82da3101-2a9c-4ae2-bb61-d447a3fbe673.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T15:46:19.291Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzIxMjUsMV0=", "attributes": { "title": "Kerberos", @@ -524,4 +524,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/870a5862-6c26-4a08-99fd-0c06cda85ba3.json b/dashboards/dashboards/870a5862-6c26-4a08-99fd-0c06cda85ba3.json index 1e4e7d37c..9dc23a03f 100644 --- a/dashboards/dashboards/870a5862-6c26-4a08-99fd-0c06cda85ba3.json +++ b/dashboards/dashboards/870a5862-6c26-4a08-99fd-0c06cda85ba3.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:41.140Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzU3NCwxXQ==", "attributes": { "title": "DNP3", @@ -512,4 +512,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/87a32f90-ef58-11e9-974e-9d600036d105.json b/dashboards/dashboards/87a32f90-ef58-11e9-974e-9d600036d105.json index 2f145ac22..bb9f14d4a 100644 --- a/dashboards/dashboards/87a32f90-ef58-11e9-974e-9d600036d105.json +++ b/dashboards/dashboards/87a32f90-ef58-11e9-974e-9d600036d105.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:42.154Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzU4OCwxXQ==", "attributes": { "title": "MQTT", @@ -546,4 +546,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/87d990cc-9e0b-41e5-b8fe-b10ae1da0c85.json b/dashboards/dashboards/87d990cc-9e0b-41e5-b8fe-b10ae1da0c85.json index 92073bf69..216a106ac 100644 --- a/dashboards/dashboards/87d990cc-9e0b-41e5-b8fe-b10ae1da0c85.json +++ b/dashboards/dashboards/87d990cc-9e0b-41e5-b8fe-b10ae1da0c85.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:43.189Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzYwMywxXQ==", "attributes": { "title": "Software", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/89d1cc50-974c-11ed-bb6b-3fb06c879b11.json b/dashboards/dashboards/89d1cc50-974c-11ed-bb6b-3fb06c879b11.json index 99ba19a25..cb87d474d 100644 --- a/dashboards/dashboards/89d1cc50-974c-11ed-bb6b-3fb06c879b11.json +++ b/dashboards/dashboards/89d1cc50-974c-11ed-bb6b-3fb06c879b11.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-01-20T16:56:59.255Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk0MCwxXQ==", "attributes": { "title": "Zeek Known Summary", @@ -587,4 +587,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/92985909-dc29-4533-9e80-d3182a0ecf1d.json b/dashboards/dashboards/92985909-dc29-4533-9e80-d3182a0ecf1d.json index 76ebf9b3b..4a835cd38 100644 --- a/dashboards/dashboards/92985909-dc29-4533-9e80-d3182a0ecf1d.json +++ b/dashboards/dashboards/92985909-dc29-4533-9e80-d3182a0ecf1d.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T18:46:32.487Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzM1OTUsMV0=", "attributes": { "title": "Syslog", @@ -384,4 +384,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/95479950-41f2-11ea-88fa-7151df485405.json b/dashboards/dashboards/95479950-41f2-11ea-88fa-7151df485405.json index f93317e30..d455fac20 100644 --- a/dashboards/dashboards/95479950-41f2-11ea-88fa-7151df485405.json +++ b/dashboards/dashboards/95479950-41f2-11ea-88fa-7151df485405.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-09-14T19:51:11.803Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk0OSwxXQ==", "attributes": { "title": "Security Overview", @@ -566,4 +566,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/9ee51f94-3316-4fc5-bd89-93a52af69714.json b/dashboards/dashboards/9ee51f94-3316-4fc5-bd89-93a52af69714.json index 4ea6d42cf..72bf0b5a0 100644 --- a/dashboards/dashboards/9ee51f94-3316-4fc5-bd89-93a52af69714.json +++ b/dashboards/dashboards/9ee51f94-3316-4fc5-bd89-93a52af69714.json @@ -1,5 +1,5 @@ { - "version": "2.11.1", + "version": "2.12.0", "objects": [ { "id": "9ee51f94-3316-4fc5-bd89-93a52af69714", @@ -7,13 +7,13 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:31:54.606Z", - "version": "Wzk1MywxXQ==", + "updated_at": "2024-04-29T15:49:16.000Z", + "version": "WzEyODYsMV0=", "attributes": { "title": "Files", "hits": 0, "description": "", - "panelsJSON": "[{\"embeddableConfig\":{\"vis\":{\"legendOpen\":false}},\"gridData\":{\"h\":10,\"i\":\"2\",\"w\":32,\"x\":16,\"y\":0},\"panelIndex\":\"2\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_0\"},{\"embeddableConfig\":{},\"gridData\":{\"h\":27,\"i\":\"3\",\"w\":8,\"x\":0,\"y\":0},\"panelIndex\":\"3\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_1\"},{\"embeddableConfig\":{\"table\":null,\"vis\":{\"params\":{\"sort\":{\"columnIndex\":0,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":0,\"direction\":\"desc\"}}},\"gridData\":{\"h\":28,\"i\":\"6\",\"w\":8,\"x\":0,\"y\":27},\"panelIndex\":\"6\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_2\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}},\"gridData\":{\"h\":18,\"i\":\"7\",\"w\":8,\"x\":40,\"y\":10},\"panelIndex\":\"7\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_3\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}},\"gridData\":{\"h\":18,\"i\":\"8\",\"w\":8,\"x\":32,\"y\":10},\"panelIndex\":\"8\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_4\"},{\"embeddableConfig\":{},\"gridData\":{\"h\":6,\"i\":\"11\",\"w\":8,\"x\":8,\"y\":0},\"panelIndex\":\"11\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_5\"},{\"embeddableConfig\":{\"hidePanelTitles\":true},\"gridData\":{\"h\":4,\"i\":\"67954b42-513c-47af-af19-e2382ad27cf9\",\"w\":8,\"x\":8,\"y\":6},\"panelIndex\":\"67954b42-513c-47af-af19-e2382ad27cf9\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_6\"},{\"embeddableConfig\":{},\"gridData\":{\"h\":47,\"i\":\"b932bc95-a3b3-411b-a7d2-2fe43e38cf8a\",\"w\":15,\"x\":8,\"y\":10},\"panelIndex\":\"b932bc95-a3b3-411b-a7d2-2fe43e38cf8a\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_7\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":1,\"direction\":\"desc\"}}},\"gridData\":{\"h\":18,\"i\":\"2d3ee44d-2d7f-4573-8b02-f6e46e550238\",\"w\":9,\"x\":23,\"y\":10},\"panelIndex\":\"2d3ee44d-2d7f-4573-8b02-f6e46e550238\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_8\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":3,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":3,\"direction\":\"desc\"}}},\"gridData\":{\"h\":29,\"i\":\"fecc7359-c195-4066-a565-2effd4380b9e\",\"w\":25,\"x\":23,\"y\":28},\"panelIndex\":\"fecc7359-c195-4066-a565-2effd4380b9e\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_9\"},{\"embeddableConfig\":{},\"gridData\":{\"h\":35,\"i\":\"8e4863be-7d69-4354-9eb4-4e30a7c983d6\",\"w\":48,\"x\":0,\"y\":57},\"panelIndex\":\"8e4863be-7d69-4354-9eb4-4e30a7c983d6\",\"version\":\"2.11.1\",\"panelRefName\":\"panel_10\"}]", + "panelsJSON": "[{\"embeddableConfig\":{\"vis\":{\"legendOpen\":false}},\"gridData\":{\"h\":10,\"i\":\"2\",\"w\":32,\"x\":16,\"y\":0},\"panelIndex\":\"2\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_0\"},{\"embeddableConfig\":{},\"gridData\":{\"h\":30,\"i\":\"3\",\"w\":8,\"x\":0,\"y\":0},\"panelIndex\":\"3\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_1\"},{\"embeddableConfig\":{\"table\":null,\"vis\":{\"params\":{\"sort\":{\"columnIndex\":0,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":0,\"direction\":\"desc\"}}},\"gridData\":{\"h\":27,\"i\":\"6\",\"w\":8,\"x\":0,\"y\":30},\"panelIndex\":\"6\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_2\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}},\"gridData\":{\"h\":25,\"i\":\"7\",\"w\":8,\"x\":40,\"y\":10},\"panelIndex\":\"7\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_3\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}},\"gridData\":{\"h\":25,\"i\":\"8\",\"w\":8,\"x\":32,\"y\":10},\"panelIndex\":\"8\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_4\"},{\"embeddableConfig\":{},\"gridData\":{\"h\":10,\"i\":\"11\",\"w\":8,\"x\":8,\"y\":0},\"panelIndex\":\"11\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_5\"},{\"embeddableConfig\":{},\"gridData\":{\"h\":47,\"i\":\"b932bc95-a3b3-411b-a7d2-2fe43e38cf8a\",\"w\":15,\"x\":8,\"y\":10},\"panelIndex\":\"b932bc95-a3b3-411b-a7d2-2fe43e38cf8a\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_6\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":1,\"direction\":\"desc\"}}},\"gridData\":{\"h\":25,\"i\":\"2d3ee44d-2d7f-4573-8b02-f6e46e550238\",\"w\":9,\"x\":23,\"y\":10},\"panelIndex\":\"2d3ee44d-2d7f-4573-8b02-f6e46e550238\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_7\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":3,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":3,\"direction\":\"desc\"}}},\"gridData\":{\"h\":44,\"i\":\"fecc7359-c195-4066-a565-2effd4380b9e\",\"w\":25,\"x\":23,\"y\":35},\"panelIndex\":\"fecc7359-c195-4066-a565-2effd4380b9e\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_8\"},{\"embeddableConfig\":{\"hidePanelTitles\":true},\"gridData\":{\"h\":22,\"i\":\"3c093f74-cb0d-4c4b-9462-0241060ba201\",\"w\":23,\"x\":0,\"y\":57},\"panelIndex\":\"3c093f74-cb0d-4c4b-9462-0241060ba201\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_9\"},{\"embeddableConfig\":{},\"gridData\":{\"h\":35,\"i\":\"8e4863be-7d69-4354-9eb4-4e30a7c983d6\",\"w\":48,\"x\":0,\"y\":79},\"panelIndex\":\"8e4863be-7d69-4354-9eb4-4e30a7c983d6\",\"version\":\"2.12.0\",\"panelRefName\":\"panel_10\"}]", "optionsJSON": "{\"useMargins\":true}", "version": 1, "timeRestore": false, @@ -55,22 +55,22 @@ { "name": "panel_6", "type": "visualization", - "id": "1642f6f0-c44c-11ee-876e-5d93490b24bb" + "id": "269ec200-7fa6-11ec-998f-a1f630163497" }, { "name": "panel_7", "type": "visualization", - "id": "269ec200-7fa6-11ec-998f-a1f630163497" + "id": "b49ab0c0-7fa9-11ec-998f-a1f630163497" }, { "name": "panel_8", "type": "visualization", - "id": "b49ab0c0-7fa9-11ec-998f-a1f630163497" + "id": "91157aa0-7fa8-11ec-998f-a1f630163497" }, { "name": "panel_9", "type": "visualization", - "id": "91157aa0-7fa8-11ec-998f-a1f630163497" + "id": "e87df6b0-f124-11ee-b6bb-474cdc003f68" }, { "name": "panel_10", @@ -88,7 +88,7 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:00.991Z", + "updated_at": "2024-04-02T17:41:12.309Z", "version": "WzU3NSwxXQ==", "attributes": { "visState": "{\"title\":\"Files - Log Count Over Time\",\"type\":\"line\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"firstPacket per 12 hours\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"\"}}],\"seriesParams\":[{\"show\":true,\"mode\":\"normal\",\"type\":\"histogram\",\"drawLinesBetweenPoints\":true,\"showCircles\":true,\"interpolate\":\"linear\",\"lineWidth\":2,\"data\":{\"id\":\"1\",\"label\":\"Count\"},\"valueAxis\":\"ValueAxis-1\"}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"showCircles\":true,\"interpolate\":\"linear\",\"scale\":\"linear\",\"drawLinesBetweenPoints\":true,\"radiusRatio\":9,\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"firstPacket\",\"interval\":\"auto\",\"min_doc_count\":1,\"extended_bounds\":{},\"customLabel\":\" \"}}],\"listeners\":{}}", @@ -118,8 +118,8 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:27.382Z", - "version": "Wzg2MSwxXQ==", + "updated_at": "2024-04-02T17:41:39.124Z", + "version": "Wzg2MCwxXQ==", "attributes": { "title": "Network Logs", "visState": "{\"title\":\"Network Logs\",\"type\":\"markdown\",\"params\":{\"markdown\":\"### General\\n[Overview](#/dashboard/0ad3d7c2-3441-485e-9dfe-dbb22e84e576) \\n[Security Overview](#/dashboard/95479950-41f2-11ea-88fa-7151df485405) \\n[ICS/IoT Security Overview](#/dashboard/4a4bde20-4760-11ea-949c-bbb5a9feecbf) \\n[Severity](#/dashboard/d2dd0180-06b1-11ec-8c6b-353266ade330) \\n[Connections](#/dashboard/abdd7550-2c7c-40dc-947e-f6d186a158c4) \\n[Actions and Results](#/dashboard/a33e0a50-afcd-11ea-993f-b7d8522a8bed) \\n[Files](#/dashboard/9ee51f94-3316-4fc5-bd89-93a52af69714) \\n[Executables](#/dashboard/0a490422-0ce9-44bf-9a2d-19329ddde8c3) \\n[Software](#/dashboard/87d990cc-9e0b-41e5-b8fe-b10ae1da0c85) \\n[Zeek Known Summary](#/dashboard/89d1cc50-974c-11ed-bb6b-3fb06c879b11) \\n[Zeek Intelligence](#/dashboard/36ed695f-edcc-47c1-b0ec-50d20c93ce0f) \\n[Zeek Notices](#/dashboard/f1f09567-fc7f-450b-a341-19d2f2bb468b) \\n[Zeek Weird](#/dashboard/1fff49f6-0199-4a0f-820b-721aff9ff1f1) \\n[Signatures](#/dashboard/665d1610-523d-11e9-a30e-e3576242f3ed) \\n[Suricata Alerts](#/dashboard/5694ca60-cbdf-11ec-a50a-5fedd672f5c5) \\n[Asset Interaction Analysis](#/dashboard/677ee170-809e-11ed-8d5b-07069f823b6f) \\n[↪ NetBox](/netbox/) \\n[↪ Arkime](/arkime/) \\n\\n### Common Protocols\\n[DCE/RPC](#/dashboard/432af556-c5c0-4cc3-8166-b274b4e3a406) ● [DHCP](#/dashboard/2d98bb8e-214c-4374-837b-20e1bcd63a5e) ● [DNS](#/dashboard/2cf94cd0-ecab-40a5-95a7-8419f3a39cd9) ● [FTP](#/dashboard/078b9aa5-9bd4-4f02-ae5e-cf80fa6f887b) / [TFTP](#/dashboard/bf5efbb0-60f1-11eb-9d60-dbf0411cfc48) ● [HTTP](#/dashboard/37041ee1-79c0-4684-a436-3173b0e89876) ● [IRC](#/dashboard/76f2f912-80da-44cd-ab66-6a73c8344cc3) ● [Kerberos](#/dashboard/82da3101-2a9c-4ae2-bb61-d447a3fbe673) ● [LDAP](#/dashboard/05e3e000-f118-11e9-acda-83a8e29e1a24) ● [MQTT](#/dashboard/87a32f90-ef58-11e9-974e-9d600036d105) ● [MySQL](#/dashboard/50ced171-1b10-4c3f-8b67-2db9635661a6) ● [NTLM](#/dashboard/543118a9-02d7-43fe-b669-b8652177fc37) ● [NTP](#/dashboard/af5df620-eeb6-11e9-bdef-65a192b7f586) ● [OSPF](#/dashboard/1cc01ff0-5205-11ec-a62c-7bc80e88f3f0) ● [QUIC](#/dashboard/11ddd980-e388-11e9-b568-cf17de8e860c) ● [RADIUS](#/dashboard/ae79b7d1-4281-4095-b2f6-fa7eafda9970) ● [RDP](#/dashboard/7f41913f-cba8-43f5-82a8-241b7ead03e0) ● [RFB](#/dashboard/f77bf097-18a8-465c-b634-eb2acc7a4f26) ● [SIP](#/dashboard/0b2354ae-0fe9-4fd9-b156-1c3870e5c7aa) ● [SMB](#/dashboard/42e831b9-41a9-4f35-8b7d-e1566d368773) ● [SMTP](#/dashboard/bb827f8e-639e-468c-93c8-9f5bc132eb8f) ● [SNMP](#/dashboard/4e5f106e-c60a-4226-8f64-d534abb912ab) ● [SSH](#/dashboard/caef3ade-d289-4d05-a511-149f3e97f238) ● [SSL](#/dashboard/7f77b58a-df3e-4cc2-b782-fd7f8bad8ffb) / [X.509 Certificates](#/dashboard/024062a6-48d6-498f-a91a-3bf2da3a3cd3) ● [STUN](#/dashboard/fa477130-2b8a-11ec-a9f2-3911c8571bfd) ● [Syslog](#/dashboard/92985909-dc29-4533-9e80-d3182a0ecf1d) ● [TDS](#/dashboard/bed185a0-ef82-11e9-b38a-2db3ee640e88) / [TDS RPC](#/dashboard/32587740-ef88-11e9-b38a-2db3ee640e88) / [TDS SQL](#/dashboard/fa141950-ef89-11e9-b38a-2db3ee640e88) ● [Telnet / rlogin / rsh](#/dashboard/c2549e10-7f2e-11ea-9f8a-1fe1327e2cd2) ● [Tunnels](#/dashboard/11be6381-beef-40a7-bdce-88c5398392fc)\\n\\n### ICS/IoT Protocols\\n[BACnet](#/dashboard/2bec1490-eb94-11e9-a384-0fcf32210194) ● [BSAP](#/dashboard/ca5799a0-56b5-11eb-b749-576de068f8ad) ● [DNP3](#/dashboard/870a5862-6c26-4a08-99fd-0c06cda85ba3) ● [EtherCAT](#/dashboard/4a073440-b286-11eb-a4d4-09fa12a6ebd4) ● [EtherNet/IP](#/dashboard/29a1b290-eb98-11e9-a384-0fcf32210194) ● [GENISYS](#/dashboard/03207c00-d07e-11ec-b4a7-d1b4003706b7) ● [Modbus](#/dashboard/152f29dc-51a2-4f53-93e9-6e92765567b8) ● [OPCUA Binary](#/dashboard/dd87edd0-796a-11ec-9ce6-b395c1ff58f4) ● [PROFINET](#/dashboard/a7514350-eba6-11e9-a384-0fcf32210194) ● [S7comm](#/dashboard/e76d05c0-eb9f-11e9-a384-0fcf32210194) ● [Synchrophasor](#/dashboard/2cc56240-e460-11ed-a9d5-9f591c284cb4) ● [Best Guess](#/dashboard/12e3a130-d83b-11eb-a0b0-f328ce09b0b7)\",\"type\":\"markdown\",\"fontSize\":10,\"openLinksInNewTab\":false},\"aggs\":[]}", @@ -141,7 +141,7 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:00.991Z", + "updated_at": "2024-04-02T17:41:12.309Z", "version": "WzU3NywxXQ==", "attributes": { "title": "Files - Files By Size (Bytes)", @@ -171,23 +171,23 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:00.991Z", - "version": "WzU3OCwxXQ==", + "updated_at": "2024-04-02T21:17:42.303Z", + "version": "WzEyODEsMV0=", "attributes": { - "visState": "{\"title\":\"FIles - Destination IP Address\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"destination.ip\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"IP Address\"}}],\"listeners\":{}}", - "description": "", "title": "FIles - Destination IP Address", + "visState": "{\"title\":\"FIles - Destination IP Address\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"destination.ip\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"IP Address\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":15,\"showPartialRows\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"showMetricsAtAllLevels\":false,\"percentageCol\":\"\"}}", "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", + "description": "", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"filter\":[]}" + "searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"kuery\"},\"filter\":[]}" }, "savedSearchRefName": "search_0" }, "references": [ { - "type": "search", "name": "search_0", + "type": "search", "id": "0aca5333-3b1c-4cda-afb4-f7dd86910459" } ], @@ -201,23 +201,23 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:00.991Z", - "version": "WzU3OSwxXQ==", + "updated_at": "2024-04-02T21:18:19.669Z", + "version": "WzEyODIsMV0=", "attributes": { - "visState": "{\"title\":\"FIles - Source IP Address\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"source.ip\",\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"File IP Address\"}}],\"listeners\":{}}", - "description": "", "title": "FIles - Source IP Address", + "visState": "{\"title\":\"FIles - Source IP Address\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"source.ip\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"File IP Address\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":15,\"showPartialRows\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"showMetricsAtAllLevels\":false,\"percentageCol\":\"\"}}", "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", + "description": "", "version": 1, "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"filter\":[]}" + "searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"kuery\"},\"filter\":[]}" }, "savedSearchRefName": "search_0" }, "references": [ { - "type": "search", "name": "search_0", + "type": "search", "id": "0aca5333-3b1c-4cda-afb4-f7dd86910459" } ], @@ -231,7 +231,7 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:00.991Z", + "updated_at": "2024-04-02T17:41:12.309Z", "version": "WzU4MCwxXQ==", "attributes": { "title": "Files - Log Count", @@ -255,37 +255,14 @@ "visualization": "7.10.0" } }, - { - "id": "1642f6f0-c44c-11ee-876e-5d93490b24bb", - "type": "visualization", - "namespaces": [ - "default" - ], - "updated_at": "2024-02-05T17:31:30.082Z", - "version": "Wzk1MiwxXQ==", - "attributes": { - "title": "Browse Extracted Files Link", - "visState": "{\"title\":\"Browse Extracted Files Link\",\"type\":\"markdown\",\"aggs\":[],\"params\":{\"fontSize\":12,\"openLinksInNewTab\":true,\"markdown\":\"[📁 Browse extracted files](/extracted-files/) (if [file extraction and scanning](/readme/docs/file-scanning.html#ZeekFileExtraction) is enabled)\"}}", - "uiStateJSON": "{}", - "description": "", - "version": 1, - "kibanaSavedObjectMeta": { - "searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"kuery\"},\"filter\":[]}" - } - }, - "references": [], - "migrationVersion": { - "visualization": "7.10.0" - } - }, { "id": "269ec200-7fa6-11ec-998f-a1f630163497", "type": "visualization", "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:00.991Z", - "version": "WzU4MSwxXQ==", + "updated_at": "2024-04-02T17:41:12.309Z", + "version": "WzU4MiwxXQ==", "attributes": { "title": "Files - Source", "visState": "{\"title\":\"Files - Source\",\"type\":\"horizontal_bar\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"file.source\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":25,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Source\"},\"schema\":\"segment\"},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"file.source\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":25,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Source\"},\"schema\":\"group\"}],\"params\":{\"type\":\"histogram\",\"grid\":{\"categoryLines\":false},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":200},\"title\":{}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"square root\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":75,\"filter\":true,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":true,\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"lineWidth\":2,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false,\"labels\":{},\"thresholdLine\":{\"show\":false,\"value\":10,\"width\":1,\"style\":\"full\",\"color\":\"#E7664C\"}}}", @@ -314,11 +291,11 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:00.991Z", - "version": "WzU4MiwxXQ==", + "updated_at": "2024-04-02T21:18:36.100Z", + "version": "WzEyODMsMV0=", "attributes": { "title": "Files - MIME Type", - "visState": "{\"title\":\"Files - MIME Type\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"file.mime_type\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Mime Type\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\"}}", + "visState": "{\"title\":\"Files - MIME Type\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"file.mime_type\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Mime Type\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":15,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\"}}", "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}}", "description": "", "version": 1, @@ -343,11 +320,11 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:00.991Z", - "version": "WzU4MywxXQ==", + "updated_at": "2024-04-02T21:19:33.639Z", + "version": "WzEyODUsMV0=", "attributes": { "title": "Files - Paths", - "visState": "{\"title\":\"Files - Paths\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"event.dataset\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":true,\"missingBucketLabel\":\"-\",\"customLabel\":\"Log Type\"},\"schema\":\"bucket\"},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"network.protocol\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":true,\"missingBucketLabel\":\"-\",\"customLabel\":\"Protocol\"},\"schema\":\"bucket\"},{\"id\":\"5\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"file.path\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Path\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":20,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\"}}", + "visState": "{\"title\":\"Files - Paths\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"event.dataset\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":true,\"missingBucketLabel\":\"-\",\"customLabel\":\"Log Type\"},\"schema\":\"bucket\"},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"network.protocol\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":true,\"missingBucketLabel\":\"-\",\"customLabel\":\"Protocol\"},\"schema\":\"bucket\"},{\"id\":\"5\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"file.path\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Path\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":30,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\"}}", "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":3,\"direction\":\"desc\"}}}}", "description": "", "version": 1, @@ -366,14 +343,37 @@ "visualization": "7.10.0" } }, + { + "id": "e87df6b0-f124-11ee-b6bb-474cdc003f68", + "type": "visualization", + "namespaces": [ + "default" + ], + "updated_at": "2024-04-02T21:16:25.446Z", + "version": "WzEyODAsMV0=", + "attributes": { + "title": "Extracted File Downloads", + "visState": "{\"title\":\"Extracted File Downloads\",\"type\":\"transform\",\"aggs\":[],\"params\":{\"meta\":\"({})\",\"multiquerydsl\":\"{\\n \\\"topn\\\": {\\n \\\"index\\\": \\\"MALCOLM_NETWORK_INDEX_PATTERN_REPLACER\\\",\\n \\\"query\\\": {\\n \\\"bool\\\": {\\n \\\"must\\\": [\\n \\\"_DASHBOARD_CONTEXT_\\\",\\n \\\"_TIME_RANGE_[firstPacket]\\\",\\n {\\n \\\"match\\\": {\\n \\\"event.dataset\\\": \\\"files\\\"\\n }\\n },\\n {\\n \\\"match\\\": {\\n \\\"event.provider\\\": \\\"zeek\\\"\\n }\\n }\\n ]\\n }\\n },\\n \\\"aggs\\\": {\\n \\\"uris\\\": {\\n \\\"terms\\\": {\\n \\\"field\\\": \\\"zeek.files.extracted_uri\\\",\\n \\\"size\\\": 10,\\n \\\"order\\\": { \\\"_key\\\": \\\"asc\\\" }\\n }\\n }\\n }\\n }\\n}\",\"formula\":\"\\n\\n
Extracted File Downloads
\\n

Only the first 10 matching results are displayed, sorted alphabetically. Apply filters ⊕ to narrow scope.

\\n\\n \\n \\n \\n \\n \\n \\n {{#response.topn.aggregations.uris.buckets}} \\n \\n \\n \\n {{/response.topn.aggregations.uris.buckets}} \\n \\n
Download Link (if preserved)
💾 {{key}}
\\n

You can also 📁Browse extracted files. See Automatic file extraction and scanning for more information.

\"}}", + "uiStateJSON": "{}", + "description": "", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"kuery\"},\"filter\":[]}" + } + }, + "references": [], + "migrationVersion": { + "visualization": "7.10.0" + } + }, { "id": "0aca5333-3b1c-4cda-afb4-f7dd86910459", "type": "search", "namespaces": [ "default" ], - "updated_at": "2024-02-05T17:21:16.253Z", - "version": "Wzc2NCwxXQ==", + "updated_at": "2024-04-02T17:41:27.817Z", + "version": "Wzc2MywxXQ==", "attributes": { "title": "Files - Logs", "description": "", @@ -409,4 +409,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/a16110b0-3f99-11e9-a58e-8bdedb0915e8.json b/dashboards/dashboards/a16110b0-3f99-11e9-a58e-8bdedb0915e8.json index 76a377638..978533c62 100644 --- a/dashboards/dashboards/a16110b0-3f99-11e9-a58e-8bdedb0915e8.json +++ b/dashboards/dashboards/a16110b0-3f99-11e9-a58e-8bdedb0915e8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:47.256Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzY0NywxXQ==", "attributes": { "title": "Connections - Destination - Sum of Total Bytes (region map)", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/a33e0a50-afcd-11ea-993f-b7d8522a8bed.json b/dashboards/dashboards/a33e0a50-afcd-11ea-993f-b7d8522a8bed.json index a72f9975b..eb8d566c3 100644 --- a/dashboards/dashboards/a33e0a50-afcd-11ea-993f-b7d8522a8bed.json +++ b/dashboards/dashboards/a33e0a50-afcd-11ea-993f-b7d8522a8bed.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-11-14T20:25:52.249Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk2MSwxXQ==", "attributes": { "title": "Actions and Results", @@ -333,4 +333,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/a7514350-eba6-11e9-a384-0fcf32210194.json b/dashboards/dashboards/a7514350-eba6-11e9-a384-0fcf32210194.json index 96953438f..ae4c59a3c 100644 --- a/dashboards/dashboards/a7514350-eba6-11e9-a384-0fcf32210194.json +++ b/dashboards/dashboards/a7514350-eba6-11e9-a384-0fcf32210194.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2024-02-27T18:15:37.621Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk5MywxXQ==", "attributes": { "title": "PROFINET", @@ -452,4 +452,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/abdd7550-2c7c-40dc-947e-f6d186a158c4.json b/dashboards/dashboards/abdd7550-2c7c-40dc-947e-f6d186a158c4.json index 898c5e6ac..721d05ca0 100644 --- a/dashboards/dashboards/abdd7550-2c7c-40dc-947e-f6d186a158c4.json +++ b/dashboards/dashboards/abdd7550-2c7c-40dc-947e-f6d186a158c4.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-04-29T20:10:44.437Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEzMjMsMV0=", "attributes": { "title": "Connections", @@ -938,4 +938,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/ae79b7d1-4281-4095-b2f6-fa7eafda9970.json b/dashboards/dashboards/ae79b7d1-4281-4095-b2f6-fa7eafda9970.json index 2c618e78f..0ad2e7dcd 100644 --- a/dashboards/dashboards/ae79b7d1-4281-4095-b2f6-fa7eafda9970.json +++ b/dashboards/dashboards/ae79b7d1-4281-4095-b2f6-fa7eafda9970.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-11-12T20:01:32.314Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEwMzgsMV0=", "attributes": { "title": "RADIUS", @@ -385,4 +385,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/af5df620-eeb6-11e9-bdef-65a192b7f586.json b/dashboards/dashboards/af5df620-eeb6-11e9-bdef-65a192b7f586.json index b50436548..4be6d0d60 100644 --- a/dashboards/dashboards/af5df620-eeb6-11e9-bdef-65a192b7f586.json +++ b/dashboards/dashboards/af5df620-eeb6-11e9-bdef-65a192b7f586.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T16:00:05.351Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzIzMzIsMV0=", "attributes": { "title": "NTP", @@ -385,4 +385,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/b50c8d17-6ed3-4de6-aed4-5181032810b2.json b/dashboards/dashboards/b50c8d17-6ed3-4de6-aed4-5181032810b2.json index 95f1d4f00..2fcdd0ac2 100644 --- a/dashboards/dashboards/b50c8d17-6ed3-4de6-aed4-5181032810b2.json +++ b/dashboards/dashboards/b50c8d17-6ed3-4de6-aed4-5181032810b2.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:53.414Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzcyNCwxXQ==", "attributes": { "title": "Connections - Source - Originator Bytes", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/b9f247c0-3f99-11e9-a58e-8bdedb0915e8.json b/dashboards/dashboards/b9f247c0-3f99-11e9-a58e-8bdedb0915e8.json index 2bb713c01..8fdfd7e83 100644 --- a/dashboards/dashboards/b9f247c0-3f99-11e9-a58e-8bdedb0915e8.json +++ b/dashboards/dashboards/b9f247c0-3f99-11e9-a58e-8bdedb0915e8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:54.429Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzczMCwxXQ==", "attributes": { "title": "Connections - Destination - Top Connection Duration (region map)", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/bb827f8e-639e-468c-93c8-9f5bc132eb8f.json b/dashboards/dashboards/bb827f8e-639e-468c-93c8-9f5bc132eb8f.json index e290c789b..53c5090eb 100644 --- a/dashboards/dashboards/bb827f8e-639e-468c-93c8-9f5bc132eb8f.json +++ b/dashboards/dashboards/bb827f8e-639e-468c-93c8-9f5bc132eb8f.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T18:17:41.430Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzMyNzUsMV0=", "attributes": { "title": "SMTP", @@ -524,4 +524,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/0d4955f0-eb25-11ec-a6d4-b3526526c2c7.json b/dashboards/dashboards/beats/0d4955f0-eb25-11ec-a6d4-b3526526c2c7.json index 983bfdaa0..e365adb8c 100644 --- a/dashboards/dashboards/beats/0d4955f0-eb25-11ec-a6d4-b3526526c2c7.json +++ b/dashboards/dashboards/beats/0d4955f0-eb25-11ec-a6d4-b3526526c2c7.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-06-13T14:30:49.985Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzkxMSwyXQ==", "attributes": { "title": "Hardware Temperature", diff --git a/dashboards/dashboards/beats/3768ef70-d819-11ee-820d-dd9fd73a3921.json b/dashboards/dashboards/beats/3768ef70-d819-11ee-820d-dd9fd73a3921.json index d7de484d4..5de33dce4 100644 --- a/dashboards/dashboards/beats/3768ef70-d819-11ee-820d-dd9fd73a3921.json +++ b/dashboards/dashboards/beats/3768ef70-d819-11ee-820d-dd9fd73a3921.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2024-03-01T22:15:31.047Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzExMTEsMV0=", "attributes": { "title": "Linux Kernel Messages", @@ -138,4 +138,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/4ca94c70-d7da-11ee-9ed3-e7afff29e59a.json b/dashboards/dashboards/beats/4ca94c70-d7da-11ee-9ed3-e7afff29e59a.json index d2bc33ddb..2e63054dc 100644 --- a/dashboards/dashboards/beats/4ca94c70-d7da-11ee-9ed3-e7afff29e59a.json +++ b/dashboards/dashboards/beats/4ca94c70-d7da-11ee-9ed3-e7afff29e59a.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2024-03-13T15:10:41.120Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEwNjUsMV0=", "attributes": { "title": "Packet Capture Statistics", @@ -547,4 +547,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/79202ee0-d811-11ee-820d-dd9fd73a3921.json b/dashboards/dashboards/beats/79202ee0-d811-11ee-820d-dd9fd73a3921.json index db57994e6..ab860c502 100644 --- a/dashboards/dashboards/beats/79202ee0-d811-11ee-820d-dd9fd73a3921.json +++ b/dashboards/dashboards/beats/79202ee0-d811-11ee-820d-dd9fd73a3921.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2024-03-01T22:03:46.831Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEwOTgsMV0=", "attributes": { "title": "Windows Events", @@ -346,4 +346,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/7a7e0a60-e8e8-11ec-b9d4-4569bb965430.json b/dashboards/dashboards/beats/7a7e0a60-e8e8-11ec-b9d4-4569bb965430.json index 127289332..bae5dba1c 100644 --- a/dashboards/dashboards/beats/7a7e0a60-e8e8-11ec-b9d4-4569bb965430.json +++ b/dashboards/dashboards/beats/7a7e0a60-e8e8-11ec-b9d4-4569bb965430.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-06-10T18:15:34.515Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzkyNSwxXQ==", "attributes": { "title": "Malcolm Sensor Audit Logs", @@ -275,4 +275,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/903f42c0-f634-11ec-828d-2fb7a4a26e1f.json b/dashboards/dashboards/beats/903f42c0-f634-11ec-828d-2fb7a4a26e1f.json index 98f29a82d..1e1551281 100644 --- a/dashboards/dashboards/beats/903f42c0-f634-11ec-828d-2fb7a4a26e1f.json +++ b/dashboards/dashboards/beats/903f42c0-f634-11ec-828d-2fb7a4a26e1f.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-06-27T19:43:07.018Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk0NywxXQ==", "attributes": { "title": "Malcolm Sensor File/Directory Integrity", @@ -210,4 +210,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/Filebeat-nginx-logs.json b/dashboards/dashboards/beats/Filebeat-nginx-logs.json index 879a0ae00..b2ad0dc32 100644 --- a/dashboards/dashboards/beats/Filebeat-nginx-logs.json +++ b/dashboards/dashboards/beats/Filebeat-nginx-logs.json @@ -7,13 +7,12 @@ "namespaces": [ "default" ], - "updated_at": "2022-06-01T19:53:27.884Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEwMTgsMV0=", "attributes": { "title": "nginx Access and Error Logs", - "description": "nginx Access and Error logs, including from Malcolm's own nginx instance", - "hits": 0, "description": "", + "hits": 0, "panelsJSON": "[{\"version\":\"2.0.0\",\"gridData\":{\"x\":0,\"y\":16,\"w\":48,\"h\":15,\"i\":\"11\"},\"panelIndex\":\"11\",\"embeddableConfig\":{\"columns\":[\"log.level\",\"error.message\"],\"sort\":[\"@timestamp\",\"desc\"]},\"panelRefName\":\"panel_0\"},{\"version\":\"2.0.0\",\"gridData\":{\"x\":0,\"y\":31,\"w\":48,\"h\":23,\"i\":\"16\"},\"panelIndex\":\"16\",\"embeddableConfig\":{\"columns\":[\"url.original\",\"http.request.method\",\"http.response.status_code\",\"http.response.body.bytes\"],\"sort\":[\"@timestamp\",\"desc\"]},\"panelRefName\":\"panel_1\"},{\"version\":\"2.0.0\",\"gridData\":{\"x\":0,\"y\":4,\"w\":48,\"h\":12,\"i\":\"18\"},\"panelIndex\":\"18\",\"embeddableConfig\":{},\"panelRefName\":\"panel_2\"},{\"version\":\"2.0.0\",\"gridData\":{\"x\":0,\"y\":0,\"w\":48,\"h\":4,\"i\":\"19\"},\"panelIndex\":\"19\",\"embeddableConfig\":{},\"panelRefName\":\"panel_3\"}]", "optionsJSON": "{\"darkTheme\":false}", "version": 1, @@ -173,4 +172,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/Filebeat-nginx-overview.json b/dashboards/dashboards/beats/Filebeat-nginx-overview.json index 73e3fe9c5..7690a08a8 100644 --- a/dashboards/dashboards/beats/Filebeat-nginx-overview.json +++ b/dashboards/dashboards/beats/Filebeat-nginx-overview.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-06-01T19:41:23.453Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzg0NywxXQ==", "attributes": { "description": "nginx logs, including from Malcolm's own nginx instance", @@ -312,4 +312,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/Metricbeat-host-overview.json b/dashboards/dashboards/beats/Metricbeat-host-overview.json index 74c3ad39a..e700b9905 100644 --- a/dashboards/dashboards/beats/Metricbeat-host-overview.json +++ b/dashboards/dashboards/beats/Metricbeat-host-overview.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-06-30T17:54:04.824Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEwMzgsMV0=", "attributes": { "title": "Resources - Hosts Overview", @@ -504,4 +504,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/Metricbeat-system-overview.json b/dashboards/dashboards/beats/Metricbeat-system-overview.json index eea8dd158..8d88d63d0 100644 --- a/dashboards/dashboards/beats/Metricbeat-system-overview.json +++ b/dashboards/dashboards/beats/Metricbeat-system-overview.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-06-30T17:45:03.314Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk2NCwxXQ==", "attributes": { "title": "Resources - System Overview", @@ -320,4 +320,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/beats/f6600310-9943-11ee-a029-e973f4774355.json b/dashboards/dashboards/beats/f6600310-9943-11ee-a029-e973f4774355.json index 0ce72ec85..9947bb6c2 100644 --- a/dashboards/dashboards/beats/f6600310-9943-11ee-a029-e973f4774355.json +++ b/dashboards/dashboards/beats/f6600310-9943-11ee-a029-e973f4774355.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-12-14T22:33:38.334Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzkzOCwxXQ==", "attributes": { "title": "Journald Logs", @@ -248,4 +248,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/bed185a0-ef82-11e9-b38a-2db3ee640e88.json b/dashboards/dashboards/bed185a0-ef82-11e9-b38a-2db3ee640e88.json index 8f866e4ef..547645d61 100644 --- a/dashboards/dashboards/bed185a0-ef82-11e9-b38a-2db3ee640e88.json +++ b/dashboards/dashboards/bed185a0-ef82-11e9-b38a-2db3ee640e88.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T18:47:53.333Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzM2MjYsMV0=", "attributes": { "title": "Tabular Data Stream", @@ -319,4 +319,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/bf5efbb0-60f1-11eb-9d60-dbf0411cfc48.json b/dashboards/dashboards/bf5efbb0-60f1-11eb-9d60-dbf0411cfc48.json index 5d42165b4..ba71a5441 100644 --- a/dashboards/dashboards/bf5efbb0-60f1-11eb-9d60-dbf0411cfc48.json +++ b/dashboards/dashboards/bf5efbb0-60f1-11eb-9d60-dbf0411cfc48.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-15T14:24:54.745Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzU3NiwxXQ==", "attributes": { "title": "TFTP", @@ -351,4 +351,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/c2549e10-7f2e-11ea-9f8a-1fe1327e2cd2.json b/dashboards/dashboards/c2549e10-7f2e-11ea-9f8a-1fe1327e2cd2.json index 58f3c21cf..676478426 100644 --- a/dashboards/dashboards/c2549e10-7f2e-11ea-9f8a-1fe1327e2cd2.json +++ b/dashboards/dashboards/c2549e10-7f2e-11ea-9f8a-1fe1327e2cd2.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T19:01:48.690Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzM3MzksMV0=", "attributes": { "title": "Telnet, rlogin and rsh", @@ -315,4 +315,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/ca5799a0-56b5-11eb-b749-576de068f8ad.json b/dashboards/dashboards/ca5799a0-56b5-11eb-b749-576de068f8ad.json index 7797d09f2..62abc2fb5 100644 --- a/dashboards/dashboards/ca5799a0-56b5-11eb-b749-576de068f8ad.json +++ b/dashboards/dashboards/ca5799a0-56b5-11eb-b749-576de068f8ad.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:24:59.492Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzc3NCwxXQ==", "attributes": { "title": "BSAP", @@ -483,4 +483,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/caef3ade-d289-4d05-a511-149f3e97f238.json b/dashboards/dashboards/caef3ade-d289-4d05-a511-149f3e97f238.json index 0788ca9de..60cc062ec 100644 --- a/dashboards/dashboards/caef3ade-d289-4d05-a511-149f3e97f238.json +++ b/dashboards/dashboards/caef3ade-d289-4d05-a511-149f3e97f238.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T18:33:44.355Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzM0MzgsMV0=", "attributes": { "title": "SSH", @@ -490,4 +490,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/d2dd0180-06b1-11ec-8c6b-353266ade330.json b/dashboards/dashboards/d2dd0180-06b1-11ec-8c6b-353266ade330.json index 7804e79d6..2960f9192 100644 --- a/dashboards/dashboards/d2dd0180-06b1-11ec-8c6b-353266ade330.json +++ b/dashboards/dashboards/d2dd0180-06b1-11ec-8c6b-353266ade330.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-09-02T18:26:13.166Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzMwMTksMV0=", "attributes": { "title": "Severity", @@ -685,4 +685,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/d41fe630-3f98-11e9-a58e-8bdedb0915e8.json b/dashboards/dashboards/d41fe630-3f98-11e9-a58e-8bdedb0915e8.json index 0c7cbab75..3d8a21438 100644 --- a/dashboards/dashboards/d41fe630-3f98-11e9-a58e-8bdedb0915e8.json +++ b/dashboards/dashboards/d41fe630-3f98-11e9-a58e-8bdedb0915e8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:25:01.513Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzgwMiwxXQ==", "attributes": { "title": "Connections - Source - Originator Bytes (region map)", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/d4fd6afd-15cb-42bf-8a25-03dd8e59b327.json b/dashboards/dashboards/d4fd6afd-15cb-42bf-8a25-03dd8e59b327.json index b70adb2a3..7787b478e 100644 --- a/dashboards/dashboards/d4fd6afd-15cb-42bf-8a25-03dd8e59b327.json +++ b/dashboards/dashboards/d4fd6afd-15cb-42bf-8a25-03dd8e59b327.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:25:02.530Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzgwOCwxXQ==", "attributes": { "title": "Connections - Destination - Responder Bytes", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/dd87edd0-796a-11ec-9ce6-b395c1ff58f4.json b/dashboards/dashboards/dd87edd0-796a-11ec-9ce6-b395c1ff58f4.json index df44ee0a6..6bd425e30 100644 --- a/dashboards/dashboards/dd87edd0-796a-11ec-9ce6-b395c1ff58f4.json +++ b/dashboards/dashboards/dd87edd0-796a-11ec-9ce6-b395c1ff58f4.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-01-26T15:54:12.963Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzk1MiwxXQ==", "attributes": { "title": "OPCUA Binary", @@ -525,4 +525,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/e09a4b86-29b5-4256-bb3b-802ac9f90404.json b/dashboards/dashboards/e09a4b86-29b5-4256-bb3b-802ac9f90404.json index 56993492a..034ea94da 100644 --- a/dashboards/dashboards/e09a4b86-29b5-4256-bb3b-802ac9f90404.json +++ b/dashboards/dashboards/e09a4b86-29b5-4256-bb3b-802ac9f90404.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:25:03.541Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzgxNCwxXQ==", "attributes": { "title": "Connections - Source - Top Connection Duration", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/e76d05c0-eb9f-11e9-a384-0fcf32210194.json b/dashboards/dashboards/e76d05c0-eb9f-11e9-a384-0fcf32210194.json index 56bd82823..6126054a3 100644 --- a/dashboards/dashboards/e76d05c0-eb9f-11e9-a384-0fcf32210194.json +++ b/dashboards/dashboards/e76d05c0-eb9f-11e9-a384-0fcf32210194.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-10-10T19:24:43.925Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzkwNiwxXQ==", "attributes": { "title": "S7comm / S7comm Plus", @@ -503,4 +503,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/ed8a6640-3f98-11e9-a58e-8bdedb0915e8.json b/dashboards/dashboards/ed8a6640-3f98-11e9-a58e-8bdedb0915e8.json index 0fc1cd719..707531d3f 100644 --- a/dashboards/dashboards/ed8a6640-3f98-11e9-a58e-8bdedb0915e8.json +++ b/dashboards/dashboards/ed8a6640-3f98-11e9-a58e-8bdedb0915e8.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:25:05.562Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzgzMywxXQ==", "attributes": { "title": "Connections - Source - Responder Bytes (region map)", @@ -135,4 +135,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/f1f09567-fc7f-450b-a341-19d2f2bb468b.json b/dashboards/dashboards/f1f09567-fc7f-450b-a341-19d2f2bb468b.json index 776cdd97a..85b2c2530 100644 --- a/dashboards/dashboards/f1f09567-fc7f-450b-a341-19d2f2bb468b.json +++ b/dashboards/dashboards/f1f09567-fc7f-450b-a341-19d2f2bb468b.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2022-05-04T17:53:11.078Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzczOSwxXQ==", "attributes": { "title": "Zeek Notices", @@ -523,4 +523,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/f394057d-1b16-4174-b994-7045f423a416.json b/dashboards/dashboards/f394057d-1b16-4174-b994-7045f423a416.json index 345c38ac8..39e46a257 100644 --- a/dashboards/dashboards/f394057d-1b16-4174-b994-7045f423a416.json +++ b/dashboards/dashboards/f394057d-1b16-4174-b994-7045f423a416.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-10T21:25:07.590Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "Wzg1MSwxXQ==", "attributes": { "title": "Connections - Source - Sum of Total Bytes", @@ -207,4 +207,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/f77bf097-18a8-465c-b634-eb2acc7a4f26.json b/dashboards/dashboards/f77bf097-18a8-465c-b634-eb2acc7a4f26.json index 5e089e539..6f9aef4b0 100644 --- a/dashboards/dashboards/f77bf097-18a8-465c-b634-eb2acc7a4f26.json +++ b/dashboards/dashboards/f77bf097-18a8-465c-b634-eb2acc7a4f26.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T17:56:05.373Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzMwNTMsMV0=", "attributes": { "title": "RFB", @@ -490,4 +490,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/fa141950-ef89-11e9-b38a-2db3ee640e88.json b/dashboards/dashboards/fa141950-ef89-11e9-b38a-2db3ee640e88.json index 69d4a8c49..84ed8892a 100644 --- a/dashboards/dashboards/fa141950-ef89-11e9-b38a-2db3ee640e88.json +++ b/dashboards/dashboards/fa141950-ef89-11e9-b38a-2db3ee640e88.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-02-11T18:59:12.130Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzM3MjIsMV0=", "attributes": { "title": "Tabular Data Stream - SQL", @@ -313,4 +313,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/dashboards/fa477130-2b8a-11ec-a9f2-3911c8571bfd.json b/dashboards/dashboards/fa477130-2b8a-11ec-a9f2-3911c8571bfd.json index 790cbd570..72bf72a0e 100644 --- a/dashboards/dashboards/fa477130-2b8a-11ec-a9f2-3911c8571bfd.json +++ b/dashboards/dashboards/fa477130-2b8a-11ec-a9f2-3911c8571bfd.json @@ -7,7 +7,7 @@ "namespaces": [ "default" ], - "updated_at": "2021-10-14T16:32:23.695Z", + "updated_at": "2024-04-29T15:49:16.000Z", "version": "WzEwOTIsMV0=", "attributes": { "title": "STUN", @@ -541,4 +541,4 @@ } } ] -} \ No newline at end of file +} diff --git a/dashboards/scripts/create-arkime-sessions-index.sh b/dashboards/scripts/create-arkime-sessions-index.sh deleted file mode 100755 index 02c3dea46..000000000 --- a/dashboards/scripts/create-arkime-sessions-index.sh +++ /dev/null @@ -1,340 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. - -set -euo pipefail -shopt -s nocasematch - -DASHB_URL=${DASHBOARDS_URL:-"http://dashboards:5601/dashboards"} -INDEX_PATTERN=${MALCOLM_NETWORK_INDEX_PATTERN:-"arkime_sessions3-*"} -INDEX_TIME_FIELD=${MALCOLM_NETWORK_INDEX_TIME_FIELD:-"firstPacket"} -OTHER_INDEX_PATTERN=${MALCOLM_OTHER_INDEX_PATTERN:-"malcolm_beats_*"} -OTHER_INDEX_TIME_FIELD=${MALCOLM_OTHER_INDEX_TIME_FIELD:-"@timestamp"} -DUMMY_DETECTOR_NAME=${DUMMY_DETECTOR_NAME:-"malcolm_init_dummy"} -DARK_MODE=${DASHBOARDS_DARKMODE:-"true"} - -MALCOLM_TEMPLATES_DIR="/opt/templates" -MALCOLM_TEMPLATE_FILE_ORIG="$MALCOLM_TEMPLATES_DIR/malcolm_template.json" -MALCOLM_TEMPLATE_FILE="/data/init/malcolm_template.json" -DEFAULT_DASHBOARD=${OPENSEARCH_DEFAULT_DASHBOARD:-"0ad3d7c2-3441-485e-9dfe-dbb22e84e576"} - -ISM_SNAPSHOT_REPO=${ISM_SNAPSHOT_REPO:-"logs"} -ISM_SNAPSHOT_COMPRESSED=${ISM_SNAPSHOT_COMPRESSED:-"false"} - -OPENSEARCH_PRIMARY=${OPENSEARCH_PRIMARY:-"opensearch-local"} -OPENSEARCH_SECONDARY=${OPENSEARCH_SECONDARY:-""} - -function DoReplacersInFile() { - # Index pattern and time field name may be specified via environment variable, but need - # to be reflected in dashboards, templates, anomaly detectors, etc. - # This function takes a file and performs that replacement. - REPLFILE="$1" - if [[ -n "$REPLFILE" ]] && [[ -f "$REPLFILE" ]]; then - sed -i "s/MALCOLM_NETWORK_INDEX_PATTERN_REPLACER/${INDEX_PATTERN}/g" "${REPLFILE}" || true - sed -i "s/MALCOLM_NETWORK_INDEX_TIME_FIELD_REPLACER/${INDEX_TIME_FIELD}/g" "${REPLFILE}" || true - sed -i "s/MALCOLM_OTHER_INDEX_PATTERN_REPLACER/${OTHER_INDEX_PATTERN}/g" "${REPLFILE}" || true - sed -i "s/MALCOLM_OTHER_INDEX_TIME_FIELD_REPLACER/${OTHER_INDEX_TIME_FIELD}/g" "${REPLFILE}" || true - fi -} - -function DoReplacersForDir() { - REPLDIR="$1" - if [[ -n "$REPLDIR" ]] && [[ -d "$REPLDIR" ]]; then - while IFS= read -r fname; do - DoReplacersInFile "$fname" - done < <( find "$REPLDIR"/ -type f 2>/dev/null ) - fi -} - -# is the argument to automatically create this index enabled? -if [[ "$CREATE_OS_ARKIME_SESSION_INDEX" = "true" ]] ; then - - # give OpenSearch time to start and Arkime to get its own template created before configuring dashboards - /data/opensearch_status.sh -l arkime_sessions3_template >/dev/null 2>&1 - - for LOOP in primary secondary; do - - if [[ "$LOOP" == "primary" ]]; then - OPENSEARCH_URL_TO_USE=${OPENSEARCH_URL:-"http://opensearch:9200"} - OPENSEARCH_CREDS_CONFIG_FILE_TO_USE=${OPENSEARCH_CREDS_CONFIG_FILE:-"/var/local/curlrc/.opensearch.primary.curlrc"} - if ( [[ "$OPENSEARCH_PRIMARY" == "opensearch-remote" ]] || [[ "$OPENSEARCH_PRIMARY" == "elasticsearch-remote" ]] ) && [[ -r "$OPENSEARCH_CREDS_CONFIG_FILE_TO_USE" ]]; then - OPENSEARCH_LOCAL=false - CURL_CONFIG_PARAMS=( - --config - "$OPENSEARCH_CREDS_CONFIG_FILE_TO_USE" - ) - else - OPENSEARCH_LOCAL=true - CURL_CONFIG_PARAMS=() - - fi - DATASTORE_TYPE="$(echo "$OPENSEARCH_PRIMARY" | cut -d- -f1)" - - elif [[ "$LOOP" == "secondary" ]] && ( [[ "$OPENSEARCH_SECONDARY" == "opensearch-remote" ]] || [[ "$OPENSEARCH_SECONDARY" == "elasticsearch-remote" ]] ) && [[ -n "${OPENSEARCH_SECONDARY_URL:-""}" ]]; then - OPENSEARCH_URL_TO_USE=$OPENSEARCH_SECONDARY_URL - OPENSEARCH_LOCAL=false - OPENSEARCH_CREDS_CONFIG_FILE_TO_USE=${OPENSEARCH_SECONDARY_CREDS_CONFIG_FILE:-"/var/local/curlrc/.opensearch.secondary.curlrc"} - if [[ -r "$OPENSEARCH_CREDS_CONFIG_FILE_TO_USE" ]]; then - CURL_CONFIG_PARAMS=( - --config - "$OPENSEARCH_CREDS_CONFIG_FILE_TO_USE" - ) - else - CURL_CONFIG_PARAMS=() - fi - DATASTORE_TYPE="$(echo "$OPENSEARCH_SECONDARY" | cut -d- -f1)" - - else - continue - fi - [[ -z "$DATASTORE_TYPE" ]] && DATASTORE_TYPE="opensearch" - if [[ "$DATASTORE_TYPE" == "elasticsearch" ]]; then - DASHBOARDS_URI_PATH="kibana" - XSRF_HEADER="kbn-xsrf" - ECS_TEMPLATES_DIR=/opt/ecs-templates - else - DASHBOARDS_URI_PATH="opensearch-dashboards" - XSRF_HEADER="osd-xsrf" - ECS_TEMPLATES_DIR=/opt/ecs-templates-os - fi - - # is the Dashboards process server up and responding to requests? - if [[ "$LOOP" != "primary" ]] || curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --fail -XGET "$DASHB_URL/api/status" ; then - - # have we not not already created the index pattern? - if [[ "$LOOP" != "primary" ]] || ! curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --fail -XGET "$DASHB_URL/api/saved_objects/index-pattern/$INDEX_PATTERN" ; then - - echo "$DATASTORE_TYPE ($LOOP) is running at \"${OPENSEARCH_URL_TO_USE}\"!" - - # register the repo name/path for opensearch snapshots (but don't count this an unrecoverable failure) - if [[ "$LOOP" == "primary" ]] && [[ "$OPENSEARCH_LOCAL" == "true" ]]; then - echo "Registering index snapshot repository..." - curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -H "Accept: application/json" \ - -H "Content-type: application/json" \ - -XPUT -fsSL "$OPENSEARCH_URL_TO_USE/_snapshot/$ISM_SNAPSHOT_REPO" \ - -d "{ \"type\": \"fs\", \"settings\": { \"location\": \"$ISM_SNAPSHOT_REPO\", \"compress\": $ISM_SNAPSHOT_COMPRESSED } }" \ - || true - fi - - TEMPLATES_IMPORT_DIR="$(mktemp -d -t templates-XXXXXX)" - rsync -a "$MALCOLM_TEMPLATES_DIR"/ "$TEMPLATES_IMPORT_DIR"/ - DoReplacersForDir "$TEMPLATES_IMPORT_DIR" - MALCOLM_TEMPLATE_FILE_ORIG_TMP="$(echo "$MALCOLM_TEMPLATE_FILE_ORIG" | sed "s@$MALCOLM_TEMPLATES_DIR@$TEMPLATES_IMPORT_DIR@")" - - # calculate combined SHA sum of all templates to save as _meta.hash to determine if - # we need to do this import (mostly useful for the secondary loop) - TEMPLATE_HASH="$(find "$ECS_TEMPLATES_DIR"/composable "$TEMPLATES_IMPORT_DIR" -type f -name "*.json" -size +2c 2>/dev/null | sort | xargs -r cat | sha256sum | awk '{print $1}')" - - # get the previous stored template hash (if any) to avoid importing if it's already been imported - set +e - TEMPLATE_HASH_OLD="$(curl "${CURL_CONFIG_PARAMS[@]}" -sSL --fail -XGET -H "Content-Type: application/json" "$OPENSEARCH_URL_TO_USE/_index_template/malcolm_template" 2>/dev/null | jq --raw-output '.index_templates[]|select(.name=="malcolm_template")|.index_template._meta.hash' 2>/dev/null)" - set -e - - # information about other index patterns will be obtained during template import - OTHER_INDEX_PATTERNS=() - - # proceed only if the current template HASH doesn't match the previously imported one, or if there - # was an error calculating or storing either - if [[ "$TEMPLATE_HASH" != "$TEMPLATE_HASH_OLD" ]] || [[ -z "$TEMPLATE_HASH_OLD" ]] || [[ -z "$TEMPLATE_HASH" ]]; then - - if [[ -d "$ECS_TEMPLATES_DIR"/composable/component ]]; then - echo "Importing ECS composable templates..." - for i in "$ECS_TEMPLATES_DIR"/composable/component/*.json; do - TEMP_BASENAME="$(basename "$i")" - TEMP_FILENAME="${TEMP_BASENAME%.*}" - echo "Importing ECS composable template $TEMP_FILENAME ..." - curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -sSL --fail -XPOST -H "Content-Type: application/json" "$OPENSEARCH_URL_TO_USE/_component_template/ecs_$TEMP_FILENAME" -d "@$i" 2>&1 || true - done - fi - - if [[ -d "$TEMPLATES_IMPORT_DIR"/composable/component ]]; then - echo "Importing custom ECS composable templates..." - for i in "$TEMPLATES_IMPORT_DIR"/composable/component/*.json; do - TEMP_BASENAME="$(basename "$i")" - TEMP_FILENAME="${TEMP_BASENAME%.*}" - echo "Importing custom ECS composable template $TEMP_FILENAME ..." - curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -sSL --fail -XPOST -H "Content-Type: application/json" "$OPENSEARCH_URL_TO_USE/_component_template/custom_$TEMP_FILENAME" -d "@$i" 2>&1 || true - done - fi - - echo "Importing malcolm_template ($TEMPLATE_HASH)..." - - if [[ -f "$MALCOLM_TEMPLATE_FILE_ORIG_TMP" ]] && [[ ! -f "$MALCOLM_TEMPLATE_FILE" ]]; then - cp "$MALCOLM_TEMPLATE_FILE_ORIG_TMP" "$MALCOLM_TEMPLATE_FILE" - fi - - # store the TEMPLATE_HASH we calculated earlier as the _meta.hash for the malcolm template - MALCOLM_TEMPLATE_FILE_TEMP="$(mktemp)" - ( jq "._meta.hash=\"$TEMPLATE_HASH\"" "$MALCOLM_TEMPLATE_FILE" >"$MALCOLM_TEMPLATE_FILE_TEMP" 2>/dev/null ) && \ - [[ -s "$MALCOLM_TEMPLATE_FILE_TEMP" ]] && \ - cp -f "$MALCOLM_TEMPLATE_FILE_TEMP" "$MALCOLM_TEMPLATE_FILE" && \ - rm -f "$MALCOLM_TEMPLATE_FILE_TEMP" - - # load malcolm_template containing malcolm data source field type mappings (merged from /opt/templates/malcolm_template.json to /data/init/malcolm_template.json in dashboard-helpers on startup) - curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -sSL --fail -XPOST -H "Content-Type: application/json" \ - "$OPENSEARCH_URL_TO_USE/_index_template/malcolm_template" -d "@$MALCOLM_TEMPLATE_FILE" 2>&1 - - # import other templates as well (and get info for creating their index patterns) - for i in "$TEMPLATES_IMPORT_DIR"/*.json; do - TEMP_BASENAME="$(basename "$i")" - TEMP_FILENAME="${TEMP_BASENAME%.*}" - if [[ "$TEMP_FILENAME" != "malcolm_template" ]]; then - echo "Importing template \"$TEMP_FILENAME\"..." - if curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -sSL --fail -XPOST -H "Content-Type: application/json" "$OPENSEARCH_URL_TO_USE/_index_template/$TEMP_FILENAME" -d "@$i" 2>&1; then - for TEMPLATE_INDEX_PATTERN in $(jq '.index_patterns[]' "$i" | tr -d '"'); do - OTHER_INDEX_PATTERNS+=("$TEMPLATE_INDEX_PATTERN;$TEMPLATE_INDEX_PATTERN;@timestamp") - done - fi - fi - done - - else - echo "malcolm_template ($TEMPLATE_HASH) already exists ($LOOP) at \"${OPENSEARCH_URL_TO_USE}\"" - - fi # TEMPLATE_HASH check - rm -rf "${TEMPLATES_IMPORT_DIR}" - - if [[ "$LOOP" == "primary" ]]; then - echo "Importing index pattern..." - - # From https://github.com/elastic/kibana/issues/3709 - # Create index pattern - curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -sSL --fail -XPOST -H "Content-Type: application/json" -H "$XSRF_HEADER: anything" \ - "$DASHB_URL/api/saved_objects/index-pattern/$INDEX_PATTERN" \ - -d"{\"attributes\":{\"title\":\"$INDEX_PATTERN\",\"timeFieldName\":\"$INDEX_TIME_FIELD\"}}" 2>&1 || true - - echo "Setting default index pattern..." - - # Make it the default index - curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -sSL -XPOST -H "Content-Type: application/json" -H "$XSRF_HEADER: anything" \ - "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/defaultIndex" \ - -d"{\"value\":\"$INDEX_PATTERN\"}" || true - - for i in ${OTHER_INDEX_PATTERNS[@]}; do - IDX_ID="$(echo "$i" | cut -d';' -f1)" - IDX_NAME="$(echo "$i" | cut -d';' -f2)" - IDX_TIME_FIELD="$(echo "$i" | cut -d';' -f3)" - echo "Creating index pattern \"$IDX_NAME\"..." - curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -sSL --fail -XPOST -H "Content-Type: application/json" -H "$XSRF_HEADER: anything" \ - "$DASHB_URL/api/saved_objects/index-pattern/$IDX_ID" \ - -d"{\"attributes\":{\"title\":\"$IDX_NAME\",\"timeFieldName\":\"$IDX_TIME_FIELD\"}}" 2>&1 || true - done - - echo "Importing $DATASTORE_TYPE Dashboards saved objects..." - - # install default dashboards - DASHBOARDS_IMPORT_DIR="$(mktemp -d -t dashboards-XXXXXX)" - rsync -a /opt/dashboards/ "$DASHBOARDS_IMPORT_DIR"/ - DoReplacersForDir "$DASHBOARDS_IMPORT_DIR"/ - for i in "${DASHBOARDS_IMPORT_DIR}"/*.json; do - if [[ "$DATASTORE_TYPE" == "elasticsearch" ]]; then - # strip out Arkime and NetBox links from dashboards' navigation pane when doing Kibana import (idaholab/Malcolm#286) - sed -i 's/ \\\\n\[↪ NetBox\](\/netbox\/) \\\\n\[↪ Arkime\](\/arkime)//' "$i" - # take care of a few other substitutions - sed -i 's/opensearchDashboardsAddFilter/kibanaAddFilter/g' "$i" - fi - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/dashboards/import?force=true" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "@$i" - done - rm -rf "${DASHBOARDS_IMPORT_DIR}" - - # beats will no longer import its dashboards into OpenSearch - # (see opensearch-project/OpenSearch-Dashboards#656 and - # opensearch-project/OpenSearch-Dashboards#831). As such, we're going to - # manually add load our dashboards in /opt/dashboards/beats as well. - BEATS_DASHBOARDS_IMPORT_DIR="$(mktemp -d -t beats-XXXXXX)" - rsync -a /opt/dashboards/beats/ "$BEATS_DASHBOARDS_IMPORT_DIR"/ - DoReplacersForDir "$BEATS_DASHBOARDS_IMPORT_DIR" - for i in "${BEATS_DASHBOARDS_IMPORT_DIR}"/*.json; do - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/dashboards/import?force=true" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "@$i" - done - rm -rf "${BEATS_DASHBOARDS_IMPORT_DIR}" - - echo "$DATASTORE_TYPE Dashboards saved objects import complete!" - - if [[ "$DATASTORE_TYPE" == "opensearch" ]]; then - # some features and tweaks like anomaly detection, alerting, etc. only exist in opensearch - - # set dark theme (or not) - [[ "$DARK_MODE" == "true" ]] && DARK_MODE_ARG='{"value":true}' || DARK_MODE_ARG='{"value":false}' - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/theme:darkMode" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "$DARK_MODE_ARG" - - # set default dashboard - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/defaultRoute" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "{\"value\":\"/app/dashboards#/view/${DEFAULT_DASHBOARD}\"}" - - # set default query time range - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d \ - '{"changes":{"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\",\n \"mode\": \"quick\"}"}}' - - # turn off telemetry - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/telemetry/v2/optIn" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d '{"enabled":false}' - - # pin filters by default - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/filters:pinnedByDefault" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d '{"value":true}' - - # enable in-session storage - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/state:storeInSessionStorage" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d '{"value":true}' - - # before we go on to create the anomaly detectors, we need to wait for actual network log documents - /data/opensearch_status.sh -w >/dev/null 2>&1 - sleep 60 - - echo "Creating $DATASTORE_TYPE anomaly detectors..." - - # Create anomaly detectors here - ANOMALY_IMPORT_DIR="$(mktemp -d -t anomaly-XXXXXX)" - rsync -a /opt/anomaly_detectors/ "$ANOMALY_IMPORT_DIR"/ - DoReplacersForDir "$ANOMALY_IMPORT_DIR" - for i in "${ANOMALY_IMPORT_DIR}"/*.json; do - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "@$i" - done - rm -rf "${ANOMALY_IMPORT_DIR}" - - # trigger a start/stop for the dummy detector to make sure the .opendistro-anomaly-detection-state index gets created - # see: - # - https://github.com/opensearch-project/anomaly-detection-dashboards-plugin/issues/109 - # - https://github.com/opensearch-project/anomaly-detection-dashboards-plugin/issues/155 - # - https://github.com/opensearch-project/anomaly-detection-dashboards-plugin/issues/156 - # - https://discuss.opendistrocommunity.dev/t/errors-opening-anomaly-detection-plugin-for-dashboards-after-creation-via-api/7711 - set +e - DUMMY_DETECTOR_ID="" - until [[ -n "$DUMMY_DETECTOR_ID" ]]; do - sleep 5 - DUMMY_DETECTOR_ID="$(curl "${CURL_CONFIG_PARAMS[@]}" -L --fail --silent --show-error -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/_search" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "{ \"query\": { \"match\": { \"name\": \"$DUMMY_DETECTOR_NAME\" } } }" | jq '.. | ._id? // empty' 2>/dev/null | head -n 1 | tr -d '"')" - done - set -e - if [[ -n "$DUMMY_DETECTOR_ID" ]]; then - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/$DUMMY_DETECTOR_ID/_start" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' - sleep 10 - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/$DUMMY_DETECTOR_ID/_stop" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' - sleep 10 - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XDELETE "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/$DUMMY_DETECTOR_ID" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' - fi - - echo "$DATASTORE_TYPE anomaly detectors creation complete!" - - echo "Creating $DATASTORE_TYPE alerting objects..." - - # Create notification/alerting objects here - - # notification channels - for i in /opt/notifications/channels/*.json; do - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_notifications/configs" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "@$i" - done - - # monitors - ALERTING_IMPORT_DIR="$(mktemp -d -t alerting-XXXXXX)" - rsync -a /opt/alerting/monitors/ "$ALERTING_IMPORT_DIR"/ - DoReplacersForDir "$ALERTING_IMPORT_DIR" - for i in "${ALERTING_IMPORT_DIR}"/*.json; do - curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_alerting/monitors" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "@$i" - done - rm -rf "${ALERTING_IMPORT_DIR}" - - echo "$DATASTORE_TYPE alerting objects creation complete!" - - fi # DATASTORE_TYPE == opensearch - fi # stuff to only do for primary - fi # index pattern not already created check - fi # dashboards is running - done # primary vs. secondary -fi # CREATE_OS_ARKIME_SESSION_INDEX is true diff --git a/dashboards/scripts/docker_entrypoint.sh b/dashboards/scripts/docker_entrypoint.sh index 38c054e10..baa386eb1 100755 --- a/dashboards/scripts/docker_entrypoint.sh +++ b/dashboards/scripts/docker_entrypoint.sh @@ -42,5 +42,7 @@ if [[ -f "$ORIG_YML" ]]; then chmod 600 "$FINAL_YML" fi +rm -f /tmp/shared-objects-created + # start the default dashboards entrypoint exec "$@" diff --git a/dashboards/scripts/shared-object-creation.sh b/dashboards/scripts/shared-object-creation.sh new file mode 100755 index 000000000..8abf9b6cd --- /dev/null +++ b/dashboards/scripts/shared-object-creation.sh @@ -0,0 +1,545 @@ +#!/bin/bash + +# Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. + +set -euo pipefail +shopt -s nocasematch + +DASHB_URL=${DASHBOARDS_URL:-"http://dashboards:5601/dashboards"} +INDEX_PATTERN=${MALCOLM_NETWORK_INDEX_PATTERN:-"arkime_sessions3-*"} +INDEX_TIME_FIELD=${MALCOLM_NETWORK_INDEX_TIME_FIELD:-"firstPacket"} +OTHER_INDEX_PATTERN=${MALCOLM_OTHER_INDEX_PATTERN:-"malcolm_beats_*"} +OTHER_INDEX_TIME_FIELD=${MALCOLM_OTHER_INDEX_TIME_FIELD:-"@timestamp"} +DUMMY_DETECTOR_NAME=${DUMMY_DETECTOR_NAME:-"malcolm_init_dummy"} +DARK_MODE=${DASHBOARDS_DARKMODE:-"true"} +DASHBOARDS_PREFIX=${DASHBOARDS_PREFIX:-""} +# trim leading and trailing spaces and remove characters that need JSON-escaping from DASHBOARDS_PREFIX +DASHBOARDS_PREFIX="${DASHBOARDS_PREFIX#"${DASHBOARDS_PREFIX%%[![:space:]]*}"}" +DASHBOARDS_PREFIX="${DASHBOARDS_PREFIX%"${DASHBOARDS_PREFIX##*[![:space:]]}"}" +DASHBOARDS_PREFIX="$(echo "$DASHBOARDS_PREFIX" | tr -d '"\\')" + +MALCOLM_TEMPLATES_DIR="/opt/templates" +MALCOLM_TEMPLATE_FILE_ORIG="$MALCOLM_TEMPLATES_DIR/malcolm_template.json" +MALCOLM_TEMPLATE_FILE="/data/init/malcolm_template.json" +DEFAULT_DASHBOARD=${OPENSEARCH_DEFAULT_DASHBOARD:-"0ad3d7c2-3441-485e-9dfe-dbb22e84e576"} + +ISM_SNAPSHOT_REPO=${ISM_SNAPSHOT_REPO:-"logs"} +ISM_SNAPSHOT_COMPRESSED=${ISM_SNAPSHOT_COMPRESSED:-"false"} + +OPENSEARCH_PRIMARY=${OPENSEARCH_PRIMARY:-"opensearch-local"} +OPENSEARCH_SECONDARY=${OPENSEARCH_SECONDARY:-""} + +STARTUP_IMPORT_PERFORMED_FILE=/tmp/shared-objects-created + +function DoReplacersInFile() { + # Index pattern and time field name may be specified via environment variable, but need + # to be reflected in dashboards, templates, anomaly detectors, etc. + # This function takes a file and performs that replacement. + REPLFILE="$1" + if [[ -n "$REPLFILE" ]] && [[ -f "$REPLFILE" ]]; then + sed -i "s/MALCOLM_NETWORK_INDEX_PATTERN_REPLACER/${INDEX_PATTERN}/g" "${REPLFILE}" || true + sed -i "s/MALCOLM_NETWORK_INDEX_TIME_FIELD_REPLACER/${INDEX_TIME_FIELD}/g" "${REPLFILE}" || true + sed -i "s/MALCOLM_OTHER_INDEX_PATTERN_REPLACER/${OTHER_INDEX_PATTERN}/g" "${REPLFILE}" || true + sed -i "s/MALCOLM_OTHER_INDEX_TIME_FIELD_REPLACER/${OTHER_INDEX_TIME_FIELD}/g" "${REPLFILE}" || true + fi +} + +function DoReplacersForDir() { + REPLDIR="$1" + if [[ -n "$REPLDIR" ]] && [[ -d "$REPLDIR" ]]; then + while IFS= read -r fname; do + DoReplacersInFile "$fname" + done < <( find "$REPLDIR"/ -type f 2>/dev/null ) + fi +} + +# store in an associative array the id, title, and .updated_at timestamp of a JSON file representing a dashboard +# arguments: +# 1 - the name of an associative array hash into which to insert the data +# 2 - the filename of the JSON file to check +# 3 - if the timestamp is not found, the fallback timestamp to use +function GetDashboardJsonInfo() { + local -n RESULT_HASH=$1 + local JSON_FILE_TO_IMPORT="$2" + local FALLBACK_TIMESTAMP="$3" + + DASHBOARD_TO_IMPORT_BASE="$(basename "$JSON_FILE_TO_IMPORT")" + DASHBOARD_TO_IMPORT_ID= + DASHBOARD_TO_IMPORT_TITLE= + DASHBOARD_TO_IMPORT_TIMESTAMP= + + if [[ -f "$JSON_FILE_TO_IMPORT" ]]; then + set +e + DASHBOARD_TO_IMPORT_ID="$(jq -r '.objects[] | select(.type == "dashboard") | .id' < "$JSON_FILE_TO_IMPORT" 2>/dev/null | head -n 1)" + DASHBOARD_TO_IMPORT_TITLE="$(jq -r '.objects[] | select(.type == "dashboard") | .attributes.title' < "$JSON_FILE_TO_IMPORT" 2>/dev/null | head -n 1)" + DASHBOARD_TO_IMPORT_TIMESTAMP="$(jq -r '.objects[] | select(.type == "dashboard") | .updated_at' < "$JSON_FILE_TO_IMPORT" 2>/dev/null | sort | tail -n 1)" + set -e + fi + + ( [[ -z "${DASHBOARD_TO_IMPORT_ID}" ]] || [[ "${DASHBOARD_TO_IMPORT_ID}" == "null" ]] ) && DASHBOARD_TO_IMPORT_ID="${DASHBOARD_TO_IMPORT_BASE%.*}" + ( [[ -z "${DASHBOARD_TO_IMPORT_TITLE}" ]] || [[ "${DASHBOARD_TO_IMPORT_TITLE}" == "null" ]] ) && DASHBOARD_TO_IMPORT_TITLE="${DASHBOARD_TO_IMPORT_BASE%.*}" + ( [[ -z "${DASHBOARD_TO_IMPORT_TIMESTAMP}" ]] || [[ "${DASHBOARD_TO_IMPORT_TIMESTAMP}" == "null" ]] ) && DASHBOARD_TO_IMPORT_TIMESTAMP="$FALLBACK_TIMESTAMP" + + RESULT_HASH["id"]="${DASHBOARD_TO_IMPORT_ID}" + RESULT_HASH["title"]="${DASHBOARD_TO_IMPORT_TITLE}" + RESULT_HASH["timestamp"]="${DASHBOARD_TO_IMPORT_TIMESTAMP}" +} + +# is the argument to automatically create this index enabled? +if [[ "${CREATE_OS_ARKIME_SESSION_INDEX:-true}" = "true" ]] ; then + + # give OpenSearch time to start and Arkime to get its own template created before configuring dashboards + /data/opensearch_status.sh -l arkime_sessions3_template >/dev/null 2>&1 + + CURRENT_ISO_UNIX_SECS="$(date -u +%s)" + CURRENT_ISO_TIMESTAMP="$(date -u +"%Y-%m-%dT%H:%M:%SZ" -d@${CURRENT_ISO_UNIX_SECS} | sed "s/Z$/.000Z/")" + EPOCH_ISO_TIMESTAMP="$(date -u +"%Y-%m-%dT%H:%M:%SZ" -d @0 | sed "s/Z$/.000Z/")" + LAST_IMPORT_CHECK_TIME="$(stat -c %Y "${STARTUP_IMPORT_PERFORMED_FILE}" 2>/dev/null || echo '0')" + + for LOOP in primary secondary; do + + if [[ "$LOOP" == "primary" ]]; then + OPENSEARCH_URL_TO_USE=${OPENSEARCH_URL:-"http://opensearch:9200"} + OPENSEARCH_CREDS_CONFIG_FILE_TO_USE=${OPENSEARCH_CREDS_CONFIG_FILE:-"/var/local/curlrc/.opensearch.primary.curlrc"} + if ( [[ "$OPENSEARCH_PRIMARY" == "opensearch-remote" ]] || [[ "$OPENSEARCH_PRIMARY" == "elasticsearch-remote" ]] ) && [[ -r "$OPENSEARCH_CREDS_CONFIG_FILE_TO_USE" ]]; then + OPENSEARCH_LOCAL=false + CURL_CONFIG_PARAMS=( + --config + "$OPENSEARCH_CREDS_CONFIG_FILE_TO_USE" + ) + else + OPENSEARCH_LOCAL=true + CURL_CONFIG_PARAMS=() + + fi + DATASTORE_TYPE="$(echo "$OPENSEARCH_PRIMARY" | cut -d- -f1)" + + elif [[ "$LOOP" == "secondary" ]] && ( [[ "$OPENSEARCH_SECONDARY" == "opensearch-remote" ]] || [[ "$OPENSEARCH_SECONDARY" == "elasticsearch-remote" ]] ) && [[ -n "${OPENSEARCH_SECONDARY_URL:-""}" ]]; then + OPENSEARCH_URL_TO_USE=$OPENSEARCH_SECONDARY_URL + OPENSEARCH_LOCAL=false + OPENSEARCH_CREDS_CONFIG_FILE_TO_USE=${OPENSEARCH_SECONDARY_CREDS_CONFIG_FILE:-"/var/local/curlrc/.opensearch.secondary.curlrc"} + if [[ -r "$OPENSEARCH_CREDS_CONFIG_FILE_TO_USE" ]]; then + CURL_CONFIG_PARAMS=( + --config + "$OPENSEARCH_CREDS_CONFIG_FILE_TO_USE" + ) + else + CURL_CONFIG_PARAMS=() + fi + DATASTORE_TYPE="$(echo "$OPENSEARCH_SECONDARY" | cut -d- -f1)" + + else + continue + fi + [[ -z "$DATASTORE_TYPE" ]] && DATASTORE_TYPE="opensearch" + if [[ "$DATASTORE_TYPE" == "elasticsearch" ]]; then + DASHBOARDS_URI_PATH="kibana" + XSRF_HEADER="kbn-xsrf" + ECS_TEMPLATES_DIR=/opt/ecs-templates + else + DASHBOARDS_URI_PATH="opensearch-dashboards" + XSRF_HEADER="osd-xsrf" + ECS_TEMPLATES_DIR=/opt/ecs-templates-os + fi + + # is the Dashboards process server up and responding to requests? + if [[ "$LOOP" != "primary" ]] || curl "${CURL_CONFIG_PARAMS[@]}" --location --silent --output /dev/null --fail -XGET "$DASHB_URL/api/status" ; then + + # has it been a while since we did a full import check (or have we never done one)? + if [[ "$LOOP" != "primary" ]] || (( (${CURRENT_ISO_UNIX_SECS} - ${LAST_IMPORT_CHECK_TIME}) >= ${CREATE_OS_ARKIME_SESSION_INDEX_CHECK_INTERVAL_SEC:-86400} )); then + + echo "$DATASTORE_TYPE ($LOOP) is running at \"${OPENSEARCH_URL_TO_USE}\"!" + + # register the repo name/path for opensearch snapshots (but don't count this an unrecoverable failure) + if [[ "$LOOP" == "primary" ]] && [[ "$OPENSEARCH_LOCAL" == "true" ]]; then + echo "Registering index snapshot repository..." + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" -H "Accept: application/json" \ + -H "Content-type: application/json" \ + -XPUT --location --fail --silent --output /dev/null --show-error "$OPENSEARCH_URL_TO_USE/_snapshot/$ISM_SNAPSHOT_REPO" \ + -d "{ \"type\": \"fs\", \"settings\": { \"location\": \"$ISM_SNAPSHOT_REPO\", \"compress\": $ISM_SNAPSHOT_COMPRESSED } }" \ + || true + fi + + ############################################################################################################################# + # Templates + # - a sha256 sum of the combined templates is calculated and the templates are imported if the previously stored hash + # (if any) does not match the files we see currently. + + TEMPLATES_IMPORTED=false + TEMPLATES_IMPORT_DIR="$(mktemp -d -t templates-XXXXXX)" + rsync -a "$MALCOLM_TEMPLATES_DIR"/ "$TEMPLATES_IMPORT_DIR"/ + DoReplacersForDir "$TEMPLATES_IMPORT_DIR" + MALCOLM_TEMPLATE_FILE_ORIG_TMP="$(echo "$MALCOLM_TEMPLATE_FILE_ORIG" | sed "s@$MALCOLM_TEMPLATES_DIR@$TEMPLATES_IMPORT_DIR@")" + + # calculate combined SHA sum of all templates to save as _meta.hash to determine if + # we need to do this import (mostly useful for the secondary loop) + TEMPLATE_HASH="$(find "$ECS_TEMPLATES_DIR"/composable "$TEMPLATES_IMPORT_DIR" -type f -name "*.json" -size +2c 2>/dev/null | sort | xargs -r cat | sha256sum | awk '{print $1}')" + + # get the previous stored template hash (if any) to avoid importing if it's already been imported + set +e + TEMPLATE_HASH_OLD="$(curl "${CURL_CONFIG_PARAMS[@]}" --location --fail --silent -XGET -H "Content-Type: application/json" "$OPENSEARCH_URL_TO_USE/_index_template/malcolm_template" 2>/dev/null | jq --raw-output '.index_templates[]|select(.name=="malcolm_template")|.index_template._meta.hash' 2>/dev/null)" + set -e + + # proceed only if the current template HASH doesn't match the previously imported one, or if there + # was an error calculating or storing either + if [[ "$TEMPLATE_HASH" != "$TEMPLATE_HASH_OLD" ]] || [[ -z "$TEMPLATE_HASH_OLD" ]] || [[ -z "$TEMPLATE_HASH" ]]; then + + if [[ -d "$ECS_TEMPLATES_DIR"/composable/component ]]; then + echo "Importing ECS composable templates..." + for i in "$ECS_TEMPLATES_DIR"/composable/component/*.json; do + TEMP_BASENAME="$(basename "$i")" + TEMP_FILENAME="${TEMP_BASENAME%.*}" + echo "Importing ECS composable template $TEMP_FILENAME ..." + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --fail --silent --output /dev/null --show-error -XPOST -H "Content-Type: application/json" \ + "$OPENSEARCH_URL_TO_USE/_component_template/ecs_$TEMP_FILENAME" -d "@$i" 2>&1 || true + done + fi + + if [[ -d "$TEMPLATES_IMPORT_DIR"/composable/component ]]; then + echo "Importing custom ECS composable templates..." + for i in "$TEMPLATES_IMPORT_DIR"/composable/component/*.json; do + TEMP_BASENAME="$(basename "$i")" + TEMP_FILENAME="${TEMP_BASENAME%.*}" + echo "Importing custom ECS composable template $TEMP_FILENAME ..." + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --fail --silent --output /dev/null --show-error -XPOST -H "Content-Type: application/json" \ + "$OPENSEARCH_URL_TO_USE/_component_template/custom_$TEMP_FILENAME" -d "@$i" 2>&1 || true + done + fi + + echo "Importing malcolm_template ($TEMPLATE_HASH)..." + + if [[ -f "$MALCOLM_TEMPLATE_FILE_ORIG_TMP" ]] && [[ ! -f "$MALCOLM_TEMPLATE_FILE" ]]; then + cp "$MALCOLM_TEMPLATE_FILE_ORIG_TMP" "$MALCOLM_TEMPLATE_FILE" + fi + + # store the TEMPLATE_HASH we calculated earlier as the _meta.hash for the malcolm template + MALCOLM_TEMPLATE_FILE_TEMP="$(mktemp)" + ( jq "._meta.hash=\"$TEMPLATE_HASH\"" "$MALCOLM_TEMPLATE_FILE" >"$MALCOLM_TEMPLATE_FILE_TEMP" 2>/dev/null ) && \ + [[ -s "$MALCOLM_TEMPLATE_FILE_TEMP" ]] && \ + cp -f "$MALCOLM_TEMPLATE_FILE_TEMP" "$MALCOLM_TEMPLATE_FILE" && \ + rm -f "$MALCOLM_TEMPLATE_FILE_TEMP" + + # load malcolm_template containing malcolm data source field type mappings (merged from /opt/templates/malcolm_template.json to /data/init/malcolm_template.json in dashboard-helpers on startup) + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --fail --silent --output /dev/null --show-error -XPOST -H "Content-Type: application/json" \ + "$OPENSEARCH_URL_TO_USE/_index_template/malcolm_template" -d "@$MALCOLM_TEMPLATE_FILE" 2>&1 + + # import other templates as well + for i in "$TEMPLATES_IMPORT_DIR"/*.json; do + TEMP_BASENAME="$(basename "$i")" + TEMP_FILENAME="${TEMP_BASENAME%.*}" + if [[ "$TEMP_FILENAME" != "malcolm_template" ]]; then + echo "Importing template \"$TEMP_FILENAME\"..." + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --fail --silent --output /dev/null --show-error -XPOST -H "Content-Type: application/json" \ + "$OPENSEARCH_URL_TO_USE/_index_template/$TEMP_FILENAME" -d "@$i" 2>&1 || true + fi + done + + TEMPLATES_IMPORTED=true + + else + echo "malcolm_template ($TEMPLATE_HASH) already exists ($LOOP) at \"${OPENSEARCH_URL_TO_USE}\"" + fi # TEMPLATE_HASH check + + # get info for creating the index patterns of "other" templates + OTHER_INDEX_PATTERNS=() + for i in "$TEMPLATES_IMPORT_DIR"/*.json; do + TEMP_BASENAME="$(basename "$i")" + TEMP_FILENAME="${TEMP_BASENAME%.*}" + if [[ "$TEMP_FILENAME" != "malcolm_template" ]]; then + for TEMPLATE_INDEX_PATTERN in $(jq -r '.index_patterns[]' "$i"); do + OTHER_INDEX_PATTERNS+=("$TEMPLATE_INDEX_PATTERN;$TEMPLATE_INDEX_PATTERN;@timestamp") + done + fi + done + + rm -rf "${TEMPLATES_IMPORT_DIR}" + + # end Templates + ############################################################################################################################# + + if [[ "$LOOP" == "primary" ]]; then + + ############################################################################################################################# + # Index pattern(s) + # - Only set overwrite=true if we actually updated the templates above, otherwise overwrite=false and fail silently + # if they already exist (http result code 409) + echo "Importing index pattern..." + [[ "${TEMPLATES_IMPORTED}" == "true" ]] && SHOW_IMPORT_ERROR="--show-error" || SHOW_IMPORT_ERROR= + + # Create index pattern + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --fail --silent --output /dev/null ${SHOW_IMPORT_ERROR} -XPOST -H "Content-Type: application/json" -H "$XSRF_HEADER: anything" \ + "$DASHB_URL/api/saved_objects/index-pattern/${INDEX_PATTERN}?overwrite=${TEMPLATES_IMPORTED}" \ + -d"{\"attributes\":{\"title\":\"$INDEX_PATTERN\",\"timeFieldName\":\"$INDEX_TIME_FIELD\"}}" 2>&1 || true + + echo "Setting default index pattern..." + + # Make it the default index + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --fail --silent --output /dev/null ${SHOW_IMPORT_ERROR} -XPOST -H "Content-Type: application/json" -H "$XSRF_HEADER: anything" \ + "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/defaultIndex" \ + -d"{\"value\":\"$INDEX_PATTERN\"}" || true + + # import other index patterns from other templates discovered above + for i in ${OTHER_INDEX_PATTERNS[@]}; do + IDX_ID="$(echo "$i" | cut -d';' -f1)" + IDX_NAME="$(echo "$i" | cut -d';' -f2)" + IDX_TIME_FIELD="$(echo "$i" | cut -d';' -f3)" + echo "Creating index pattern \"$IDX_NAME\"..." + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --fail --silent --output /dev/null ${SHOW_IMPORT_ERROR} -XPOST -H "Content-Type: application/json" -H "$XSRF_HEADER: anything" \ + "$DASHB_URL/api/saved_objects/index-pattern/${IDX_ID}?overwrite=${TEMPLATES_IMPORTED}" \ + -d"{\"attributes\":{\"title\":\"$IDX_NAME\",\"timeFieldName\":\"$IDX_TIME_FIELD\"}}" 2>&1 || true + done + + # end Index pattern + ############################################################################################################################# + + echo "Importing $DATASTORE_TYPE Dashboards saved objects..." + + ############################################################################################################################# + # Dashboards + # - Dashboard JSON files have an .updated_at field with an ISO 8601-formatted date (e.g., "2024-04-29T15:49:16.000Z"). + # For each dashboard, query to see if the object exists and get the .updated_at field for the .type == "dashboard" + # objects. If the dashboard doesn't already exist, or if the file-to-be-imported date is newer than the old one, + # then import the dashboard. + + DASHBOARDS_IMPORT_DIR="$(mktemp -d -t dashboards-XXXXXX)" + rsync -a /opt/dashboards/ "$DASHBOARDS_IMPORT_DIR"/ + DoReplacersForDir "$DASHBOARDS_IMPORT_DIR"/ + for i in "${DASHBOARDS_IMPORT_DIR}"/*.json; do + + # get info about the dashboard to be imported + declare -A NEW_DASHBOARD_INFO + GetDashboardJsonInfo NEW_DASHBOARD_INFO "$i" "$CURRENT_ISO_TIMESTAMP" + + # get the old dashboard JSON and its info + curl "${CURL_CONFIG_PARAMS[@]}" --location --fail --silent --show-error --output "${i}_old" \ + -XGET "$DASHB_URL/api/$DASHBOARDS_URI_PATH/dashboards/export?dashboard=$DASHBOARD_TO_IMPORT_ID" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' || true + declare -A OLD_DASHBOARD_INFO + GetDashboardJsonInfo OLD_DASHBOARD_INFO "${i}_old" "$EPOCH_ISO_TIMESTAMP" + rm -f "${i}_old" + + # compare the timestamps and import if it's newer + if [[ "${NEW_DASHBOARD_INFO["timestamp"]}" > "${OLD_DASHBOARD_INFO["timestamp"]}" ]]; then + if [[ "$DATASTORE_TYPE" == "elasticsearch" ]]; then + # strip out Arkime and NetBox links from dashboards' navigation pane when doing Kibana import (idaholab/Malcolm#286) + sed -i 's/ \\\\n\[↪ NetBox\](\/netbox\/) \\\\n\[↪ Arkime\](\/arkime)//' "$i" + # take care of a few other substitutions + sed -i 's/opensearchDashboardsAddFilter/kibanaAddFilter/g' "$i" + fi + # prepend $DASHBOARDS_PREFIX to dashboards' titles + [[ -n "$DASHBOARDS_PREFIX" ]] && jq ".objects |= map(if .type == \"dashboard\" then .attributes.title |= \"${DASHBOARDS_PREFIX} \" + . else . end)" < "$i" | sponge "$i" + # import the dashboard + echo "Importing dashboard \"${NEW_DASHBOARD_INFO["title"]}\" (${NEW_DASHBOARD_INFO["timestamp"]} > ${OLD_DASHBOARD_INFO["timestamp"]}) ..." + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/dashboards/import?force=true" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "@$i" + fi # timestamp check + done + rm -rf "${DASHBOARDS_IMPORT_DIR}" + + # beats will no longer import its dashboards into OpenSearch + # (see opensearch-project/OpenSearch-Dashboards#656 and + # opensearch-project/OpenSearch-Dashboards#831). As such, we're going to + # manually add load our dashboards in /opt/dashboards/beats as well. + BEATS_DASHBOARDS_IMPORT_DIR="$(mktemp -d -t beats-XXXXXX)" + rsync -a /opt/dashboards/beats/ "$BEATS_DASHBOARDS_IMPORT_DIR"/ + DoReplacersForDir "$BEATS_DASHBOARDS_IMPORT_DIR" + for i in "${BEATS_DASHBOARDS_IMPORT_DIR}"/*.json; do + + # get info about the dashboard to be imported + declare -A NEW_DASHBOARD_INFO + GetDashboardJsonInfo NEW_DASHBOARD_INFO "$i" "$CURRENT_ISO_TIMESTAMP" + + # get the old dashboard JSON and its info + curl "${CURL_CONFIG_PARAMS[@]}" --location --fail --silent --show-error --output "${i}_old" \ + -XGET "$DASHB_URL/api/$DASHBOARDS_URI_PATH/dashboards/export?dashboard=$DASHBOARD_TO_IMPORT_ID" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' || true + declare -A OLD_DASHBOARD_INFO + GetDashboardJsonInfo OLD_DASHBOARD_INFO "${i}_old" "$EPOCH_ISO_TIMESTAMP" + rm -f "${i}_old" + + # compare the timestamps and import if it's newer + if [[ "${NEW_DASHBOARD_INFO["timestamp"]}" > "${OLD_DASHBOARD_INFO["timestamp"]}" ]]; then + # prepend $DASHBOARDS_PREFIX to dashboards' titles + [[ -n "$DASHBOARDS_PREFIX" ]] && jq ".objects |= map(if .type == \"dashboard\" then .attributes.title |= \"${DASHBOARDS_PREFIX} \" + . else . end)" < "$i" | sponge "$i" + # import the dashboard + echo "Importing dashboard \"${NEW_DASHBOARD_INFO["title"]}\" (${NEW_DASHBOARD_INFO["timestamp"]} > ${OLD_DASHBOARD_INFO["timestamp"]}) ..." + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/dashboards/import?force=true" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "@$i" + fi # timestamp check + done + rm -rf "${BEATS_DASHBOARDS_IMPORT_DIR}" + + echo "$DATASTORE_TYPE Dashboards saved objects import complete!" + + # end Dashboards + ############################################################################################################################# + + if [[ "$DATASTORE_TYPE" == "opensearch" ]]; then + # some features and tweaks like anomaly detection, alerting, etc. only exist in opensearch + + ############################################################################################################################# + # OpenSearch Tweaks + # - TODO: only do these if they've NEVER been done before? + echo "Updating $DATASTORE_TYPE UI settings..." + + # set dark theme (or not) + [[ "$DARK_MODE" == "true" ]] && DARK_MODE_ARG='{"value":true}' || DARK_MODE_ARG='{"value":false}' + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/theme:darkMode" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "$DARK_MODE_ARG" + + # set default dashboard + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/defaultRoute" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' \ + -d "{\"value\":\"/app/dashboards#/view/${DEFAULT_DASHBOARD}\"}" + + # set default query time range + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' \ + -d '{"changes":{"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\",\n \"mode\": \"quick\"}"}}' + + # turn off telemetry + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$DASHB_URL/api/telemetry/v2/optIn" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' \ + -d '{"enabled":false}' + + # pin filters by default + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/filters:pinnedByDefault" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' \ + -d '{"value":true}' + + # enable in-session storage + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/settings/state:storeInSessionStorage" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' \ + -d '{"value":true}' + + echo "$DATASTORE_TYPE settings updates complete!" + + # end OpenSearch Tweaks + ############################################################################################################################# + + # before we go on to create the anomaly detectors, we need to wait for actual network log documents + /data/opensearch_status.sh -w >/dev/null 2>&1 + sleep 60 + + ############################################################################################################################# + # OpenSearch anomaly detectors + # - the .anomaly_detector.last_update_time field in the anomaly detector definition JSON is used to check + # whether or not the anomaly detector needs to be updated + + echo "Creating $DATASTORE_TYPE anomaly detectors..." + + # Create anomaly detectors here + ANOMALY_IMPORT_DIR="$(mktemp -d -t anomaly-XXXXXX)" + rsync -a /opt/anomaly_detectors/ "$ANOMALY_IMPORT_DIR"/ + DoReplacersForDir "$ANOMALY_IMPORT_DIR" + for i in "${ANOMALY_IMPORT_DIR}"/*.json; do + # identify the name of the anomaly detector, and, if it already exists, its + # ID and previous update time, as well as the update time of the file to import + set +e + DETECTOR_NAME="$(jq -r '.name' 2>/dev/null < "$i")" + + DETECTOR_NEW_UPDATE_TIME="$(jq -r '.anomaly_detector.last_update_time' 2>/dev/null < "$i")" + ( [[ -z "${DETECTOR_NEW_UPDATE_TIME}" ]] || [[ "${DETECTOR_NEW_UPDATE_TIME}" == "null" ]] ) && DETECTOR_NEW_UPDATE_TIME=$CURRENT_ISO_UNIX_SECS + + DETECTOR_EXISTING_UPDATE_TIME=0 + DETECTOR_EXISTING_ID="$(curl "${CURL_CONFIG_PARAMS[@]}" --location --fail --silent -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/_search" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "{ \"query\": { \"match\": { \"name\": \"$DETECTOR_NAME\" } } }" | jq '.. | ._id? // empty' 2>/dev/null | head -n 1 | tr -d '"')" + if [[ -n "${DETECTOR_EXISTING_ID}" ]]; then + DETECTOR_EXISTING_UPDATE_TIME="$(curl "${CURL_CONFIG_PARAMS[@]}" --location --fail --silent -XGET "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/$DETECTOR_EXISTING_ID" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' | jq -r '.anomaly_detector.last_update_time')" + ( [[ -z "${DETECTOR_EXISTING_UPDATE_TIME}" ]] || [[ "${DETECTOR_EXISTING_UPDATE_TIME}" == "null" ]] ) && DETECTOR_EXISTING_UPDATE_TIME=0 + fi + set -e + + # if the file to import is newer than the existing anomaly detector, then update it + if (( $DETECTOR_NEW_UPDATE_TIME > $DETECTOR_EXISTING_UPDATE_TIME )); then + [[ "$DETECTOR_NAME" != "$DUMMY_DETECTOR_NAME" ]] && \ + echo "Importing detector \"${DETECTOR_NAME}\" ($DETECTOR_NEW_UPDATE_TIME > $DETECTOR_EXISTING_UPDATE_TIME) ..." + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' \ + -d "@$i" + fi + done + rm -rf "${ANOMALY_IMPORT_DIR}" + + # trigger a start/stop for the dummy detector to make sure the .opendistro-anomaly-detection-state index gets created + # see: + # - https://github.com/opensearch-project/anomaly-detection-dashboards-plugin/issues/109 + # - https://github.com/opensearch-project/anomaly-detection-dashboards-plugin/issues/155 + # - https://github.com/opensearch-project/anomaly-detection-dashboards-plugin/issues/156 + # - https://discuss.opendistrocommunity.dev/t/errors-opening-anomaly-detection-plugin-for-dashboards-after-creation-via-api/7711 + set +e + DUMMY_DETECTOR_ID="" + until [[ -n "$DUMMY_DETECTOR_ID" ]]; do + sleep 5 + DUMMY_DETECTOR_ID="$(curl "${CURL_CONFIG_PARAMS[@]}" --location --fail --silent -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/_search" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "{ \"query\": { \"match\": { \"name\": \"$DUMMY_DETECTOR_NAME\" } } }" | jq '.. | ._id? // empty' 2>/dev/null | head -n 1 | tr -d '"')" + done + set -e + if [[ -n "$DUMMY_DETECTOR_ID" ]]; then + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error -XPOST \ + "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/$DUMMY_DETECTOR_ID/_start" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' + sleep 10 + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/$DUMMY_DETECTOR_ID/_stop" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' + sleep 10 + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XDELETE "$OPENSEARCH_URL_TO_USE/_plugins/_anomaly_detection/detectors/$DUMMY_DETECTOR_ID" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' + fi + + echo "$DATASTORE_TYPE anomaly detectors creation complete!" + + # end OpenSearch anomaly detectors + ############################################################################################################################# + + ############################################################################################################################# + # OpenSearch alerting + # - always attempt to write the default Malcolm alerting objects, regardless of whether they exist or not + + echo "Creating $DATASTORE_TYPE alerting objects..." + + # Create notification/alerting objects here + + # notification channels + for i in /opt/notifications/channels/*.json; do + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_notifications/configs" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' \ + -d "@$i" + done + + # monitors + ALERTING_IMPORT_DIR="$(mktemp -d -t alerting-XXXXXX)" + rsync -a /opt/alerting/monitors/ "$ALERTING_IMPORT_DIR"/ + DoReplacersForDir "$ALERTING_IMPORT_DIR" + for i in "${ALERTING_IMPORT_DIR}"/*.json; do + curl "${CURL_CONFIG_PARAMS[@]}" -w "\n" --location --silent --output /dev/null --show-error \ + -XPOST "$OPENSEARCH_URL_TO_USE/_plugins/_alerting/monitors" \ + -H "$XSRF_HEADER:true" -H 'Content-type:application/json' \ + -d "@$i" + done + rm -rf "${ALERTING_IMPORT_DIR}" + + echo "$DATASTORE_TYPE alerting objects creation complete!" + + # end OpenSearch alerting + ############################################################################################################################# + + fi # DATASTORE_TYPE == opensearch + fi # stuff to only do for primary + + touch "${STARTUP_IMPORT_PERFORMED_FILE}" + fi # LAST_IMPORT_CHECK_TIME interval check + + fi # dashboards is running + done # primary vs. secondary +fi # CREATE_OS_ARKIME_SESSION_INDEX is true diff --git a/dashboards/templates/composable/component/zeek.json b/dashboards/templates/composable/component/zeek.json index 76b20aafd..ea948f921 100644 --- a/dashboards/templates/composable/component/zeek.json +++ b/dashboards/templates/composable/component/zeek.json @@ -67,6 +67,7 @@ "zeek.files.extracted": { "type": "keyword" }, "zeek.files.extracted_cutoff": { "type": "keyword" }, "zeek.files.extracted_size": { "type": "long" }, + "zeek.files.extracted_uri": { "type": "keyword" }, "zeek.files.filename": { "type": "keyword", "ignore_above": 1024, "fields": { "text": { "type": "text" } } }, "zeek.files.ftime": { "type": "date" }, "zeek.files.local_orig": { "type": "keyword" }, diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 4640b8a3e..c1c5c66ba 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -1,7 +1,5 @@ # Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. -version: '3.7' - x-logging: &default-logging driver: local @@ -15,7 +13,7 @@ services: build: context: . dockerfile: Dockerfiles/opensearch.Dockerfile - image: ghcr.io/idaholab/malcolm/opensearch:24.03.1 + image: ghcr.io/idaholab/malcolm/opensearch:24.04.0 # Technically the "hedgehog" profile doesn't have OpenSearch, but in that case # OPENSEARCH_PRIMARY will be set to remote, which means the container will # start but not actually run OpenSearch. It's included in both profiles to @@ -60,7 +58,7 @@ services: build: context: . dockerfile: Dockerfiles/dashboards-helper.Dockerfile - image: ghcr.io/idaholab/malcolm/dashboards-helper:24.03.1 + image: ghcr.io/idaholab/malcolm/dashboards-helper:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -93,7 +91,7 @@ services: build: context: . dockerfile: Dockerfiles/dashboards.Dockerfile - image: ghcr.io/idaholab/malcolm/dashboards:24.03.1 + image: ghcr.io/idaholab/malcolm/dashboards:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -124,7 +122,7 @@ services: build: context: . dockerfile: Dockerfiles/logstash.Dockerfile - image: ghcr.io/idaholab/malcolm/logstash-oss:24.03.1 + image: ghcr.io/idaholab/malcolm/logstash-oss:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -178,7 +176,7 @@ services: build: context: . dockerfile: Dockerfiles/filebeat.Dockerfile - image: ghcr.io/idaholab/malcolm/filebeat-oss:24.03.1 + image: ghcr.io/idaholab/malcolm/filebeat-oss:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -216,7 +214,7 @@ services: build: context: . dockerfile: Dockerfiles/arkime.Dockerfile - image: ghcr.io/idaholab/malcolm/arkime:24.03.1 + image: ghcr.io/idaholab/malcolm/arkime:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -255,7 +253,7 @@ services: build: context: . dockerfile: Dockerfiles/arkime.Dockerfile - image: ghcr.io/idaholab/malcolm/arkime:24.03.1 + image: ghcr.io/idaholab/malcolm/arkime:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -294,7 +292,7 @@ services: build: context: . dockerfile: Dockerfiles/zeek.Dockerfile - image: ghcr.io/idaholab/malcolm/zeek:24.03.1 + image: ghcr.io/idaholab/malcolm/zeek:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -330,7 +328,7 @@ services: build: context: . dockerfile: Dockerfiles/zeek.Dockerfile - image: ghcr.io/idaholab/malcolm/zeek:24.03.1 + image: ghcr.io/idaholab/malcolm/zeek:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -362,7 +360,7 @@ services: build: context: . dockerfile: Dockerfiles/suricata.Dockerfile - image: ghcr.io/idaholab/malcolm/suricata:24.03.1 + image: ghcr.io/idaholab/malcolm/suricata:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -393,7 +391,7 @@ services: build: context: . dockerfile: Dockerfiles/suricata.Dockerfile - image: ghcr.io/idaholab/malcolm/suricata:24.03.1 + image: ghcr.io/idaholab/malcolm/suricata:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -426,7 +424,7 @@ services: build: context: . dockerfile: Dockerfiles/file-monitor.Dockerfile - image: ghcr.io/idaholab/malcolm/file-monitor:24.03.1 + image: ghcr.io/idaholab/malcolm/file-monitor:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -458,7 +456,7 @@ services: build: context: . dockerfile: Dockerfiles/pcap-capture.Dockerfile - image: ghcr.io/idaholab/malcolm/pcap-capture:24.03.1 + image: ghcr.io/idaholab/malcolm/pcap-capture:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -490,7 +488,7 @@ services: build: context: . dockerfile: Dockerfiles/pcap-monitor.Dockerfile - image: ghcr.io/idaholab/malcolm/pcap-monitor:24.03.1 + image: ghcr.io/idaholab/malcolm/pcap-monitor:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -521,7 +519,7 @@ services: build: context: . dockerfile: Dockerfiles/file-upload.Dockerfile - image: ghcr.io/idaholab/malcolm/file-upload:24.03.1 + image: ghcr.io/idaholab/malcolm/file-upload:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -548,7 +546,7 @@ services: retries: 3 start_period: 60s htadmin: - image: ghcr.io/idaholab/malcolm/htadmin:24.03.1 + image: ghcr.io/idaholab/malcolm/htadmin:24.04.0 profiles: ["malcolm"] logging: *default-logging build: @@ -578,7 +576,7 @@ services: retries: 3 start_period: 60s freq: - image: ghcr.io/idaholab/malcolm/freq:24.03.1 + image: ghcr.io/idaholab/malcolm/freq:24.04.0 profiles: ["malcolm"] logging: *default-logging build: @@ -605,7 +603,7 @@ services: retries: 3 start_period: 60s netbox: - image: ghcr.io/idaholab/malcolm/netbox:24.03.1 + image: ghcr.io/idaholab/malcolm/netbox:24.04.0 profiles: ["malcolm"] logging: *default-logging build: @@ -642,7 +640,7 @@ services: retries: 3 start_period: 120s netbox-postgres: - image: ghcr.io/idaholab/malcolm/postgresql:24.03.1 + image: ghcr.io/idaholab/malcolm/postgresql:24.04.0 profiles: ["malcolm"] logging: *default-logging build: @@ -671,7 +669,7 @@ services: retries: 3 start_period: 45s netbox-redis: - image: ghcr.io/idaholab/malcolm/redis:24.03.1 + image: ghcr.io/idaholab/malcolm/redis:24.04.0 profiles: ["malcolm"] logging: *default-logging build: @@ -704,7 +702,7 @@ services: retries: 3 start_period: 45s netbox-redis-cache: - image: ghcr.io/idaholab/malcolm/redis:24.03.1 + image: ghcr.io/idaholab/malcolm/redis:24.04.0 profiles: ["malcolm"] logging: *default-logging build: @@ -736,7 +734,7 @@ services: retries: 3 start_period: 45s api: - image: ghcr.io/idaholab/malcolm/api:24.03.1 + image: ghcr.io/idaholab/malcolm/api:24.04.0 profiles: ["malcolm"] logging: *default-logging build: @@ -769,7 +767,7 @@ services: build: context: . dockerfile: Dockerfiles/nginx.Dockerfile - image: ghcr.io/idaholab/malcolm/nginx-proxy:24.03.1 + image: ghcr.io/idaholab/malcolm/nginx-proxy:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" diff --git a/docker-compose.yml b/docker-compose.yml index c90d9eff9..0835fc6c7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,5 @@ # Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. -version: '3.7' - x-logging: &default-logging driver: local @@ -12,7 +10,7 @@ x-logging: services: opensearch: - image: ghcr.io/idaholab/malcolm/opensearch:24.03.1 + image: ghcr.io/idaholab/malcolm/opensearch:24.04.0 # Technically the "hedgehog" profile doesn't have OpenSearch, but in that case # OPENSEARCH_PRIMARY will be set to remote, which means the container will # start but not actually run OpenSearch. It's included in both profiles to @@ -54,7 +52,7 @@ services: retries: 3 start_period: 180s dashboards-helper: - image: ghcr.io/idaholab/malcolm/dashboards-helper:24.03.1 + image: ghcr.io/idaholab/malcolm/dashboards-helper:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -84,7 +82,7 @@ services: retries: 3 start_period: 30s dashboards: - image: ghcr.io/idaholab/malcolm/dashboards:24.03.1 + image: ghcr.io/idaholab/malcolm/dashboards:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -112,7 +110,7 @@ services: retries: 3 start_period: 210s logstash: - image: ghcr.io/idaholab/malcolm/logstash-oss:24.03.1 + image: ghcr.io/idaholab/malcolm/logstash-oss:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -159,7 +157,7 @@ services: retries: 3 start_period: 600s filebeat: - image: ghcr.io/idaholab/malcolm/filebeat-oss:24.03.1 + image: ghcr.io/idaholab/malcolm/filebeat-oss:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -194,7 +192,7 @@ services: retries: 3 start_period: 60s arkime: - image: ghcr.io/idaholab/malcolm/arkime:24.03.1 + image: ghcr.io/idaholab/malcolm/arkime:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -228,7 +226,7 @@ services: retries: 3 start_period: 210s arkime-live: - image: ghcr.io/idaholab/malcolm/arkime:24.03.1 + image: ghcr.io/idaholab/malcolm/arkime:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -261,7 +259,7 @@ services: - ./arkime/rules:/opt/arkime/rules:ro - ./pcap:/data/pcap zeek: - image: ghcr.io/idaholab/malcolm/zeek:24.03.1 + image: ghcr.io/idaholab/malcolm/zeek:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -293,7 +291,7 @@ services: retries: 3 start_period: 60s zeek-live: - image: ghcr.io/idaholab/malcolm/zeek:24.03.1 + image: ghcr.io/idaholab/malcolm/zeek:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -321,7 +319,7 @@ services: - ./zeek/intel:/opt/zeek/share/zeek/site/intel - ./zeek/custom:/opt/zeek/share/zeek/site/custom:ro suricata: - image: ghcr.io/idaholab/malcolm/suricata:24.03.1 + image: ghcr.io/idaholab/malcolm/suricata:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -349,7 +347,7 @@ services: retries: 3 start_period: 120s suricata-live: - image: ghcr.io/idaholab/malcolm/suricata:24.03.1 + image: ghcr.io/idaholab/malcolm/suricata:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -379,7 +377,7 @@ services: - ./suricata/rules:/opt/suricata/rules:ro - ./suricata/include-configs:/opt/suricata/include-configs:ro file-monitor: - image: ghcr.io/idaholab/malcolm/file-monitor:24.03.1 + image: ghcr.io/idaholab/malcolm/file-monitor:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -408,7 +406,7 @@ services: retries: 3 start_period: 60s pcap-capture: - image: ghcr.io/idaholab/malcolm/pcap-capture:24.03.1 + image: ghcr.io/idaholab/malcolm/pcap-capture:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -437,7 +435,7 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./pcap/upload:/pcap pcap-monitor: - image: ghcr.io/idaholab/malcolm/pcap-monitor:24.03.1 + image: ghcr.io/idaholab/malcolm/pcap-monitor:24.04.0 profiles: ["malcolm", "hedgehog"] logging: *default-logging restart: "no" @@ -465,7 +463,7 @@ services: retries: 3 start_period: 90s upload: - image: ghcr.io/idaholab/malcolm/file-upload:24.03.1 + image: ghcr.io/idaholab/malcolm/file-upload:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -492,7 +490,7 @@ services: retries: 3 start_period: 60s htadmin: - image: ghcr.io/idaholab/malcolm/htadmin:24.03.1 + image: ghcr.io/idaholab/malcolm/htadmin:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -519,7 +517,7 @@ services: retries: 3 start_period: 60s freq: - image: ghcr.io/idaholab/malcolm/freq:24.03.1 + image: ghcr.io/idaholab/malcolm/freq:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -543,7 +541,7 @@ services: retries: 3 start_period: 60s netbox: - image: ghcr.io/idaholab/malcolm/netbox:24.03.1 + image: ghcr.io/idaholab/malcolm/netbox:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -577,7 +575,7 @@ services: retries: 3 start_period: 120s netbox-postgres: - image: ghcr.io/idaholab/malcolm/postgresql:24.03.1 + image: ghcr.io/idaholab/malcolm/postgresql:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -603,7 +601,7 @@ services: retries: 3 start_period: 45s netbox-redis: - image: ghcr.io/idaholab/malcolm/redis:24.03.1 + image: ghcr.io/idaholab/malcolm/redis:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -633,7 +631,7 @@ services: retries: 3 start_period: 45s netbox-redis-cache: - image: ghcr.io/idaholab/malcolm/redis:24.03.1 + image: ghcr.io/idaholab/malcolm/redis:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" @@ -662,7 +660,7 @@ services: retries: 3 start_period: 45s api: - image: ghcr.io/idaholab/malcolm/api:24.03.1 + image: ghcr.io/idaholab/malcolm/api:24.04.0 profiles: ["malcolm"] logging: *default-logging command: gunicorn --bind 0:5000 manage:app @@ -689,7 +687,7 @@ services: retries: 3 start_period: 60s nginx-proxy: - image: ghcr.io/idaholab/malcolm/nginx-proxy:24.03.1 + image: ghcr.io/idaholab/malcolm/nginx-proxy:24.04.0 profiles: ["malcolm"] logging: *default-logging restart: "no" diff --git a/docs/README.md b/docs/README.md index 4db0c10f3..82b5817f7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -30,6 +30,7 @@ Malcolm can also easily be deployed locally on an ordinary consumer workstation - [Malcolm Configuration](malcolm-config.md#ConfigAndTuning) + [Environment variable files](malcolm-config.md#MalcolmConfigEnvVars) + [Command-line arguments](malcolm-config.md#CommandLineConfig) + + [Managing disk usage](malcolm-config.md#DiskUsage) - [Configure authentication](authsetup.md#AuthSetup) + [Local account management](authsetup.md#AuthBasicAccountManagement) + [Lightweight Directory Access Protocol (LDAP) authentication](authsetup.md#AuthLDAP) @@ -41,6 +42,7 @@ Malcolm can also easily be deployed locally on an ordinary consumer workstation + [Linux host system configuration](host-config-linux.md#HostSystemConfigLinux) + [macOS host system configuration](host-config-macos.md#HostSystemConfigMac) + [Windows host system configuration](host-config-windows.md#HostSystemConfigWindows) + - [Managing disk usage](malcolm-config.md#DiskUsage) * [Running Malcolm](running.md#Running) - [OpenSearch and Elasticsearch instances](opensearch-instances.md#OpenSearchInstance) + [Authentication and authorization for remote data store clusters](opensearch-instances.md#OpenSearchAuth) diff --git a/docs/api-version.md b/docs/api-version.md index ea8ed3153..c52793da0 100644 --- a/docs/api-version.md +++ b/docs/api-version.md @@ -47,6 +47,6 @@ Returns version information about Malcolm and version/[health](https://opensearc } }, "sha": "77574975", - "version": "24.03.1" + "version": "24.04.0" } ``` diff --git a/docs/asset-interaction-analysis.md b/docs/asset-interaction-analysis.md index 6f7b451f0..aa75820f8 100644 --- a/docs/asset-interaction-analysis.md +++ b/docs/asset-interaction-analysis.md @@ -98,7 +98,7 @@ If the `NETBOX_AUTO_POPULATE` [environment variable in `./config/netbox-common.e However, careful consideration should be made before enabling this feature: the purpose of an asset management system is to document the intended state of a network: with Malcolm configured to populate NetBox with the live network state, a network misconfiguration fault could result in an **incorrect documented configuration**. -Devices created using this autopopulate method will have their `status` field set to `staged`. It is recommended that users periodically review automatically-created devices for correctness and to fill in known details that couldn't be determined from network traffic. For example, the `manufacturer` field for automatically-created devices will be set based on the organizational unique identifier (OUI) determined from the first three bytes of the observed MAC address, which may not be accurate if the device's traffic was observed across a router. If possible, observed hostnames will be used in the naming of the automatically-created devices, falling back to the device manufacturer otherwise (e.g., `MYHOSTNAME @ 10.10.0.123` vs. `Schweitzer Engineering @ 10.10.0.123`). +Devices created using this autopopulate method will include a `tags` value of `Autopopulated`. It is recommended that users periodically review automatically-created devices for correctness and to fill in known details that couldn't be determined from network traffic. For example, the `manufacturer` field for automatically-created devices will be set based on the organizational unique identifier (OUI) determined from the first three bytes of the observed MAC address, which may not be accurate if the device's traffic was observed across a router. If possible, observed hostnames (extracted from logs that provide a mapping of IP address to host name, such as Zeek's `dns.log`, `ntlm.log`, and `dhcp.log`) will be used in the naming of the automatically-created devices, falling back to the device manufacturer otherwise (e.g., `MYHOSTNAME` vs. `Schweitzer Engineering @ 10.10.0.123`). Since device autocreation is based on IP address, information about network segments (IP [prefixes](https://docs.netbox.dev/en/stable/models/ipam/prefix/)) must be first [manually specified](#NetBoxPopManual) in NetBox in order for devices to be automatically populated. Users should populate the `description` field in the NetBox IPAM Prefixes data model to specify a name to be used for NetBox network segment autopopulation and enrichment, otherwise the IP prefix itself will be used. diff --git a/docs/contributing-pcap.md b/docs/contributing-pcap.md index abb804904..9f32ffd0b 100644 --- a/docs/contributing-pcap.md +++ b/docs/contributing-pcap.md @@ -1,6 +1,6 @@ # PCAP processors -When a PCAP is uploaded (either through Malcolm's [upload web interface](upload.md#Upload) or just copied manually into the `./pcap/upload` directory), the `pcap-monitor` container has a script that picks up those PCAP files and publishes to a [ZeroMQ](https://zeromq.org/) topic that can be subscribed to by any other process that wants to analyze that PCAP. In Malcolm (at the time of the [v24.03.1 release]({{ site.github.repository_url }}/releases/tag/v24.03.1)), there are three such ZeroMQ topics: the `zeek`, `suricata` and `arkime` containers. These actually share the [same script]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/pcap_processor.py) to run the PCAP through Zeek, Suricata, and Arkime, respectively. For an example to follow, the `zeek` container is the less complicated of the two. To integrate a new PCAP processing tool into Malcolm (named `cooltool` for this example) the process would entail: +When a PCAP is uploaded (either through Malcolm's [upload web interface](upload.md#Upload) or just copied manually into the `./pcap/upload` directory), the `pcap-monitor` container has a script that picks up those PCAP files and publishes to a [ZeroMQ](https://zeromq.org/) topic that can be subscribed to by any other process that wants to analyze that PCAP. In Malcolm (at the time of the [v24.04.0 release]({{ site.github.repository_url }}/releases/tag/v24.04.0)), there are three such ZeroMQ topics: the `zeek`, `suricata` and `arkime` containers. These actually share the [same script]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/pcap_processor.py) to run the PCAP through Zeek, Suricata, and Arkime, respectively. For an example to follow, the `zeek` container is the less complicated of the two. To integrate a new PCAP processing tool into Malcolm (named `cooltool` for this example) the process would entail: 1. Define the service as instructed in the [Adding a new service](contributing-new-image.md#NewImage) section * Note how the existing `zeek` and `arkime` services use [bind mounts](contributing-local-modifications.md#Bind) to access the local `./pcap` directory diff --git a/docs/file-scanning.md b/docs/file-scanning.md index 06cc4150c..884c481dd 100644 --- a/docs/file-scanning.md +++ b/docs/file-scanning.md @@ -52,14 +52,14 @@ The `EXTRACTED_FILE_HTTP_SERVER_…` [environment variables in `zeek.env` and `z The files extracted by Zeek and the data about those files can be accessed through several of Malcolm's user interfaces. -* The [Files dashboard](dashboards.md#PrebuiltVisualizations) summarizes the file transfers observed in network traffic: +* The [Files dashboard](dashboards.md#PrebuiltVisualizations) summarizes the file transfers observed in network traffic. The **Extracted File Downloads** table provides download links for the extracted files matching the currently applied filters. Note that the presence of these links don't necessarily imply that the files they represent are available: depending on factors such as file preservation settings (above) and retention policies, files that were extracted and scanned may no longer be available. When this is the case, clicking one of the file download links will result in a "file not found" error. If one of these links refers to a file that was extracted and scanned on a [Hedgehog Linux](hedgehog.md) network sensor, Malcolm must be able to communicate with that sensor in order to retrieve and download the file. ![The files dashboard displays metrics about the files transferred over the network](./images/screenshots/dashboards_files_source.png) -* Viewing logs from Zeek's `files.log` (e.g., `event.provider == zeek && event.dataset == files`), the Arkime [session](arkime.md#ArkimeSessions) detail's **Extracted Filename** field can be clicked for a context menu item to **Download** the extracted file, if it was preserved as described above. +* Viewing logs from Zeek's `files.log` (e.g., `event.provider == zeek && event.dataset == files`), the Arkime [session](arkime.md#ArkimeSessions) detail's **Extracted Filename URL** field can be clicked for a context menu item to download the extracted file, if it was preserved as described above. ![Arkime's session details for files.log entries](./images/screenshots/arkime_sessions_files_log_dl.png) -* Malcolm provides an extracted files directory listing to browse and download Zeek-extracted files. This interface is available at at **https://localhost/extracted-files/** if connecting locally. The Zeek `uid` and `fuid` values associated with these files and the sessions from which they were extracted are listed in the **IDs** column as filter links back into Dashboards. +* Malcolm provides an extracted files directory listing to browse and download Zeek-extracted files. This interface is available at **https://localhost/extracted-files/** if connecting locally. The Zeek `uid` and `fuid` values associated with these files and the sessions from which they were extracted are listed in the **IDs** column as filter links back into Dashboards. Similarly, files extracted and preserved on a [Hedgehog Linux](hedgehog.md) network sensor can be accessed at **https://localhost/hh-extracted-files/X.X.X.X/**, where **X.X.X.X** represents the IP address or hostname of the sensor (e.g., `https://localhost/hh-extracted-files/192.168.122.57/` if the sensor's IP address were 192.168.122.57). ![The extracted files directory interface](./images/screenshots/extracted_files_dl_ui.png) diff --git a/docs/hedgehog-config-zeek-intel.md b/docs/hedgehog-config-zeek-intel.md index f817ae84e..e91d24ddc 100644 --- a/docs/hedgehog-config-zeek-intel.md +++ b/docs/hedgehog-config-zeek-intel.md @@ -1,9 +1,7 @@ -# Zeek Intelligence Framework +# Zeek Intelligence Framework -To quote Zeek's [Intelligence Framework](https://docs.zeek.org/en/master/frameworks/intel.html) documentation, "The goals of Zeek’s Intelligence Framework are to consume intelligence data, make it available for matching, and provide infrastructure to improve performance and memory utilization. Data in the Intelligence Framework is an atomic piece of intelligence such as an IP address or an e-mail address. This atomic data will be packed with metadata such as a freeform source field, a freeform descriptive field, and a URL which might lead to more information about the specific item." Zeek [intelligence](https://docs.zeek.org/en/master/scripts/base/frameworks/intel/main.zeek.html) [indicator types](https://docs.zeek.org/en/master/scripts/base/frameworks/intel/main.zeek.html#type-Intel::Type) include IP addresses, URLs, file names, hashes, email addresses, and more. +Hedgehog Linux's management of intel files is identical to what is done by a Malcolm instance's Zeek docker containers. Please see [Zeek Intelligence Framework](zeek-intel.md#ZeekIntel) in the main Malcolm documentation for more information. For Hedgehog Linux, the only deviations from what is outlined in that document are that some of the file locations are different than they are on a Malcolm instance: -Hedgehog Linux doesn't come bundled with intelligence files from any particular feed, but they can be easily included into your local instance. On [startup]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/zeek_intel_setup.sh), The subdirectories under `/opt/sensor/sensor_ctl/zeek/intel` which contain their own `__load__.zeek` file will be `@load`-ed as-is, while subdirectories containing "loose" intelligence files will be [loaded](https://docs.zeek.org/en/master/frameworks/intel.html#loading-intelligence) automatically with a `redef Intel::read_files` directive. - -Note that Hedgehog Linux does not manage updates for these intelligence files. You should use the update mechanism suggested by your feeds' maintainers to keep them up to date. Adding and deleting intelligence files under this directory will take effect upon restarting Zeek. - -See [Zeek Intelligence Framework](zeek-intel.md#ZeekIntel) in the main Malcolm documentation for more information. +* the `ZEEK_INTEL_REFRESH_CRON_EXPRESSION` environment variable can be found in `/opt/sensor/sensor_ctl/control_vars.conf` +* the `./zeek/intel` directory is `/opt/sensor/sensor_ctl/zeek/intel` +* to manually refresh the Zeek intel files instead of waiting for the interval specified by `ZEEK_INTEL_REFRESH_CRON_EXPRESSION`, run `/opt/zeek/bin/zeek_intel_setup.sh true` \ No newline at end of file diff --git a/docs/hedgehog-iso-build.md b/docs/hedgehog-iso-build.md index 321bfa257..c0fb157fe 100644 --- a/docs/hedgehog-iso-build.md +++ b/docs/hedgehog-iso-build.md @@ -29,7 +29,7 @@ Building the ISO may take 90 minutes or more depending on your system. As the bu ``` … -Finished, created "/sensor-build/hedgehog-24.03.1.iso" +Finished, created "/sensor-build/hedgehog-24.04.0.iso" … ``` diff --git a/docs/hedgehog-upgrade.md b/docs/hedgehog-upgrade.md index 3b3cec849..fa98816be 100644 --- a/docs/hedgehog-upgrade.md +++ b/docs/hedgehog-upgrade.md @@ -291,7 +291,7 @@ sensor@hedgehog:~$ cd /opt sensor@hedgehog:opt$ diff sensor_upgrade_backup_2020-05-08/sensor_ctl/control_vars.conf sensor/sensor_ctl/control_vars.conf 1,2c1,2 < export CAPTURE_INTERFACE=enp0s3 -< export CAPTURE_FILTER="not port 5044 and not port 5601 and not port 8005 and not port 9200 and not port 9600" +< export CAPTURE_FILTER="not port 5044 and not port 5601 and not port 8005 and not port 8006 and not port 9200 and not port 9600" --- > export CAPTURE_INTERFACE=xxxx > export CAPTURE_FILTER="" diff --git a/docs/hedgehog.md b/docs/hedgehog.md index 94880fa51..dcee88c0e 100644 --- a/docs/hedgehog.md +++ b/docs/hedgehog.md @@ -29,7 +29,9 @@ Hedgehog Linux is a Debian-based operating system built to * [ssl-client-receive](malcolm-hedgehog-e2e-iso-install.md#HedgehogGetCerts): Receive client SSL files for filebeat from Malcolm * [filebeat](malcolm-hedgehog-e2e-iso-install.md#Hedgehogfilebeat): Zeek and Suricata log forwarding * [miscbeat](malcolm-hedgehog-e2e-iso-install.md#Hedgehogmiscbeat): System metrics forwarding + * [acl-configure](malcolm-hedgehog-e2e-iso-install.md#HedgehogACL): Configure ACL for artifact reachback from Malcolm - [Autostart services](malcolm-hedgehog-e2e-iso-install.md#HedgehogConfigAutostart) + - [Managing disk usage](malcolm-hedgehog-e2e-iso-install.md#HedgehogDiskUsage) + [Zeek Intelligence Framework](hedgehog-config-zeek-intel.md#HedgehogZeekIntel) * [Appendix A - Generating the ISO](hedgehog-iso-build.md#HedgehogISOBuild) * [Appendix B - Generating a Raspberry Pi Image](hedgehog-raspi-build.md#HedgehogRaspiBuild) diff --git a/docs/host-config-windows.md b/docs/host-config-windows.md index e14ffb561..b7bfc7213 100644 --- a/docs/host-config-windows.md +++ b/docs/host-config-windows.md @@ -1,16 +1,19 @@ # Windows host system configuration -## Installing and configuring Docker Desktop for Windows +Installing and configuring Docker to run under the Windows Subsystem for Linux (WSL) must be done manually, rather than through the `install.py` script as with Linux and macOS. -Installing and configuring [Docker to run under Windows](https://docs.docker.com/desktop/windows/wsl/) must be done manually, rather than through the `install.py` script as with Linux and macOS. +1. Make sure your Windows 10 or Windows 11 system is up-to-date with the current Windows cummulative update. +1. Open PowerShell or Windows Command Prompt in administrator mode by right-clicking the icon in the Start Menu and selecting **Run as administrator**. +1. Enter the command [`wsl --install`](https://learn.microsoft.com/en-us/windows/wsl/install) and wait for the installation to finish. +1. Reboot the system. +1. Upon rebooting, the Linux terminal will open automatically with **Installing, this may take a few minutes...**. Wait for this process to complete. +1. As prompted, create a default UNIX user account by providing a username and password. +1. Install Docker by running `curl -fsSL https://get.docker.com -o get-docker.sh` followed by `sudo sh get-docker.sh`. +1. Add the user account you just created to the `docker` group by running `sudo usermod -a -G docker username`, replacing `username` with the username you created before. +1. Verify Docker and Docker Compose are correctly installed by running `docker --version` and `docker compose version`. +1. If running Ubuntu 22.04 LTS, to ensure container networking works correctly, run `sudo update-alternatives --config iptables` and select the option for `iptables-legacy`. +1. Restart WSL by rebooting the system. +1. Upon rebooting, open the Start Menu and select the name of the Linux distribution you installed (**Ubuntu** is the default). +1. Continue with the Malcolm installation and configuration as described in the [**Quick start**](quickstart.md#QuickStart) documentation or illustrated with the **[Installation example using Ubuntu 22.04 LTS](ubuntu-install-example.md#InstallationExample)**. -1. Be running Windows 10, Version 1903 or higher -1. Prepare your system and [install WSL](https://docs.microsoft.com/en-us/windows/wsl/install) and a Linux distribution by running `wsl --install -d Debian` in PowerShell as Administrator (these instructions are tested with Debian, but may work with other distributions) -1. Install Docker Desktop for Windows either by downloading the installer from the [official Docker site](https://docs.docker.com/desktop/install/windows-install/) or installing it through [chocolatey](https://chocolatey.org/packages/docker-desktop). -1. Follow the [Docker Desktop WSL 2 backend](https://docs.docker.com/desktop/windows/wsl/) instructions to finish configuration and review best practices -1. Reboot -1. Open the WSL distribution's terminal and run `docker info` to ensure Docker is running - -## Finish Malcolm's configuration - -Once Docker is installed, configured, and running as described in the previous section, run [`./scripts/configure`](malcolm-config.md#ConfigAndTuning) to finish configuration of the local Malcolm installation. Malcolm will be controlled and run from within your WSL distribution's terminal environment. \ No newline at end of file +Once the configuration is complete, Malcolm will be started and stopped from within your WSL distribution's terminal environment as described in [**Running Malcolm**](running.md). diff --git a/docs/images/hedgehog/images/arkime_confirm.png b/docs/images/hedgehog/images/arkime_confirm.png index 218734137..c0e6a738c 100644 Binary files a/docs/images/hedgehog/images/arkime_confirm.png and b/docs/images/hedgehog/images/arkime_confirm.png differ diff --git a/docs/images/hedgehog/images/autostarts.png b/docs/images/hedgehog/images/autostarts.png index fc77489a4..c6c23cfe6 100644 Binary files a/docs/images/hedgehog/images/autostarts.png and b/docs/images/hedgehog/images/autostarts.png differ diff --git a/docs/images/hedgehog/images/autostarts_confirm.png b/docs/images/hedgehog/images/autostarts_confirm.png index 5bdea7d0a..8436fff70 100644 Binary files a/docs/images/hedgehog/images/autostarts_confirm.png and b/docs/images/hedgehog/images/autostarts_confirm.png differ diff --git a/docs/images/hedgehog/images/file_server_zip.png b/docs/images/hedgehog/images/file_server_zip.png new file mode 100644 index 000000000..03d96e6ef Binary files /dev/null and b/docs/images/hedgehog/images/file_server_zip.png differ diff --git a/docs/images/hedgehog/images/forwarder_config.png b/docs/images/hedgehog/images/forwarder_config.png index b20139db3..41529a2a5 100644 Binary files a/docs/images/hedgehog/images/forwarder_config.png and b/docs/images/hedgehog/images/forwarder_config.png differ diff --git a/docs/images/hedgehog/images/htpdate_host.png b/docs/images/hedgehog/images/htpdate_host.png index e9597d1cb..709196966 100644 Binary files a/docs/images/hedgehog/images/htpdate_host.png and b/docs/images/hedgehog/images/htpdate_host.png differ diff --git a/docs/images/hedgehog/images/htpdate_setup.png b/docs/images/hedgehog/images/htpdate_setup.png index 337027768..99e494155 100644 Binary files a/docs/images/hedgehog/images/htpdate_setup.png and b/docs/images/hedgehog/images/htpdate_setup.png differ diff --git a/docs/images/hedgehog/images/htpdate_test.png b/docs/images/hedgehog/images/htpdate_test.png index 7e74f04d4..4d40857d7 100644 Binary files a/docs/images/hedgehog/images/htpdate_test.png and b/docs/images/hedgehog/images/htpdate_test.png differ diff --git a/docs/images/hedgehog/images/malcolm_arkime_reachback_acl.png b/docs/images/hedgehog/images/malcolm_arkime_reachback_acl.png index 20bb80135..fad00c3ed 100644 Binary files a/docs/images/hedgehog/images/malcolm_arkime_reachback_acl.png and b/docs/images/hedgehog/images/malcolm_arkime_reachback_acl.png differ diff --git a/docs/images/hedgehog/images/ssl_client_receive.png b/docs/images/hedgehog/images/ssl_client_receive.png index 53ad7a48f..6ec9cf702 100644 Binary files a/docs/images/hedgehog/images/ssl_client_receive.png and b/docs/images/hedgehog/images/ssl_client_receive.png differ diff --git a/docs/images/screenshots/arkime_sessions_files_browse.png b/docs/images/screenshots/arkime_sessions_files_browse.png deleted file mode 100644 index 3b281d2a5..000000000 Binary files a/docs/images/screenshots/arkime_sessions_files_browse.png and /dev/null differ diff --git a/docs/images/screenshots/arkime_sessions_files_log_dl.png b/docs/images/screenshots/arkime_sessions_files_log_dl.png index 5262b2188..31ca82e5d 100644 Binary files a/docs/images/screenshots/arkime_sessions_files_log_dl.png and b/docs/images/screenshots/arkime_sessions_files_log_dl.png differ diff --git a/docs/images/screenshots/dashboards_files_source.png b/docs/images/screenshots/dashboards_files_source.png index 14a25105a..db78a3324 100644 Binary files a/docs/images/screenshots/dashboards_files_source.png and b/docs/images/screenshots/dashboards_files_source.png differ diff --git a/docs/index-management.md b/docs/index-management.md index 607dcd60d..0c7110ad4 100644 --- a/docs/index-management.md +++ b/docs/index-management.md @@ -2,9 +2,8 @@ Malcolm releases prior to v6.2.0 used environment variables to configure OpenSearch [Index State Management](https://opensearch.org/docs/latest/im-plugin/ism/index/) [policies](https://opensearch.org/docs/latest/im-plugin/ism/policies/). -Since then, OpenSearch Dashboards has developed and released plugins with UIs for [Index State Management](https://opensearch.org/docs/latest/im-plugin/ism/index/) and [Snapshot Management](https://opensearch.org/docs/latest/opensearch/snapshots/sm-dashboards/). Because these plugins provide a more comprehensive and user-friendly interface for these features, the old environment variable-based configuration code has been removed from Malcolm; with the exception of the code that uses the `OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT` and `OPENSEARCH_INDEX_SIZE_PRUNE_NAME_SORT` [variables in `dashboards-helper.env`](malcolm-config.md#MalcolmConfigEnvVars), which deals with deleting the oldest network session metadata indices when the database exceeds a certain size. +Since then, OpenSearch Dashboards has developed and released plugins with UIs for [Index State Management](https://opensearch.org/docs/latest/im-plugin/ism/index/) and [Snapshot Management](https://opensearch.org/docs/latest/opensearch/snapshots/sm-dashboards/). Because these plugins provide a more comprehensive and user-friendly interface for these features, the old environment variable-based configuration code has been removed from Malcolm, with a few exceptions. See [**Managing disk usage**](malcolm-config.md#DiskUsage) for more information. -Note that OpenSearch index state management and snapshot management only deals with disk space consumed by OpenSearch indices: it does not have anything to do with PCAP file storage. The `MANAGE_PCAP_FILES` environment variable in the [`arkime.env` file](malcolm-config.md#MalcolmConfigEnvVars) can be used to allow Arkime to prune old PCAP files based on available disk space. # Using ILM/ISM with Arkime diff --git a/docs/kubernetes.md b/docs/kubernetes.md index 3856f5966..4ff314dcc 100644 --- a/docs/kubernetes.md +++ b/docs/kubernetes.md @@ -272,28 +272,28 @@ agent2 | agent2 | 192.168.56.12 | agent2 | k3s | 6000m | agent1 | agent1 | 192.168.56.11 | agent1 | k3s | 6000m | 861.34m | 14.36% | 19.55Gi | 9.29Gi | 61.28Gi | 11 | Pod Name | State | Pod IP | Pod Kind | Worker Node | CPU Usage | Memory Usage | Container Name:Restarts | Container Image | -api-deployment-6f4686cf59-bn286 | Running | 10.42.2.14 | ReplicaSet | agent1 | 0.11m | 59.62Mi | api-container:0 | api:24.03.1 | -file-monitor-deployment-855646bd75-vk7st | Running | 10.42.2.16 | ReplicaSet | agent1 | 8.47m | 1.46Gi | file-monitor-container:0 | file-monitor:24.03.1 | -zeek-live-deployment-64b69d4b6f-947vr | Running | 10.42.2.17 | ReplicaSet | agent1 | 0.02m | 12.44Mi | zeek-live-container:0 | zeek:24.03.1 | -dashboards-helper-deployment-69dc54f6b6-ln4sq | Running | 10.42.2.15 | ReplicaSet | agent1 | 10.77m | 38.43Mi | dashboards-helper-container:0 | dashboards-helper:24.03.1 | -upload-deployment-586568844b-4jnk9 | Running | 10.42.2.18 | ReplicaSet | agent1 | 0.15m | 29.78Mi | upload-container:0 | file-upload:24.03.1 | -filebeat-deployment-6ff8bc444f-t7h49 | Running | 10.42.2.20 | ReplicaSet | agent1 | 2.84m | 70.71Mi | filebeat-container:0 | filebeat-oss:24.03.1 | -zeek-offline-deployment-844f4865bd-g2sdm | Running | 10.42.2.21 | ReplicaSet | agent1 | 0.17m | 41.92Mi | zeek-offline-container:0 | zeek:24.03.1 | -logstash-deployment-6fbc9fdcd5-hwx8s | Running | 10.42.2.22 | ReplicaSet | agent1 | 85.55m | 2.91Gi | logstash-container:0 | logstash-oss:24.03.1 | -netbox-deployment-cdcff4977-hbbw5 | Running | 10.42.2.23 | ReplicaSet | agent1 | 807.64m | 702.86Mi | netbox-container:0 | netbox:24.03.1 | -suricata-offline-deployment-6ccdb89478-z5696 | Running | 10.42.2.19 | ReplicaSet | agent1 | 0.22m | 34.88Mi | suricata-offline-container:0 | suricata:24.03.1 | -dashboards-deployment-69b5465db-vz88g | Running | 10.42.1.14 | ReplicaSet | agent2 | 0.94m | 100.12Mi | dashboards-container:0 | dashboards:24.03.1 | -netbox-redis-cache-deployment-5f77d47b8b-z7t2z | Running | 10.42.1.15 | ReplicaSet | agent2 | 3.57m | 7.36Mi | netbox-redis-cache-container:0 | redis:24.03.1 | -suricata-live-deployment-6494c77759-9rlnt | Running | 10.42.1.16 | ReplicaSet | agent2 | 0.02m | 9.69Mi | suricata-live-container:0 | suricata:24.03.1 | -freq-deployment-cfd84fd97-dnngf | Running | 10.42.1.17 | ReplicaSet | agent2 | 0.2m | 26.36Mi | freq-container:0 | freq:24.03.1 | -arkime-deployment-56999cdd66-s98pp | Running | 10.42.1.18 | ReplicaSet | agent2 | 4.15m | 113.07Mi | arkime-container:0 | arkime:24.03.1 | -pcap-monitor-deployment-594ff674c4-fsm7m | Running | 10.42.1.19 | ReplicaSet | agent2 | 1.24m | 48.44Mi | pcap-monitor-container:0 | pcap-monitor:24.03.1 | -pcap-capture-deployment-7c8bf6957-jzpzn | Running | 10.42.1.20 | ReplicaSet | agent2 | 0.02m | 9.64Mi | pcap-capture-container:0 | pcap-capture:24.03.1 | -netbox-postgres-deployment-5879b8dffc-kkt56 | Running | 10.42.1.21 | ReplicaSet | agent2 | 70.91m | 33.02Mi | netbox-postgres-container:0 | postgresql:24.03.1 | -htadmin-deployment-6fc46888b9-sq6ln | Running | 10.42.1.23 | ReplicaSet | agent2 | 0.14m | 30.53Mi | htadmin-container:0 | htadmin:24.03.1 | -netbox-redis-deployment-5bcd8f6c96-j5xpf | Running | 10.42.1.24 | ReplicaSet | agent2 | 1.46m | 7.34Mi | netbox-redis-container:0 | redis:24.03.1 | -nginx-proxy-deployment-69fcc4968d-f68tq | Running | 10.42.1.22 | ReplicaSet | agent2 | 0.31m | 22.63Mi | nginx-proxy-container:0 | nginx-proxy:24.03.1 | -opensearch-deployment-75498799f6-4zmwd | Running | 10.42.1.25 | ReplicaSet | agent2 | 89.8m | 11.03Gi | opensearch-container:0 | opensearch:24.03.1 | +api-deployment-6f4686cf59-bn286 | Running | 10.42.2.14 | ReplicaSet | agent1 | 0.11m | 59.62Mi | api-container:0 | api:24.04.0 | +file-monitor-deployment-855646bd75-vk7st | Running | 10.42.2.16 | ReplicaSet | agent1 | 8.47m | 1.46Gi | file-monitor-container:0 | file-monitor:24.04.0 | +zeek-live-deployment-64b69d4b6f-947vr | Running | 10.42.2.17 | ReplicaSet | agent1 | 0.02m | 12.44Mi | zeek-live-container:0 | zeek:24.04.0 | +dashboards-helper-deployment-69dc54f6b6-ln4sq | Running | 10.42.2.15 | ReplicaSet | agent1 | 10.77m | 38.43Mi | dashboards-helper-container:0 | dashboards-helper:24.04.0 | +upload-deployment-586568844b-4jnk9 | Running | 10.42.2.18 | ReplicaSet | agent1 | 0.15m | 29.78Mi | upload-container:0 | file-upload:24.04.0 | +filebeat-deployment-6ff8bc444f-t7h49 | Running | 10.42.2.20 | ReplicaSet | agent1 | 2.84m | 70.71Mi | filebeat-container:0 | filebeat-oss:24.04.0 | +zeek-offline-deployment-844f4865bd-g2sdm | Running | 10.42.2.21 | ReplicaSet | agent1 | 0.17m | 41.92Mi | zeek-offline-container:0 | zeek:24.04.0 | +logstash-deployment-6fbc9fdcd5-hwx8s | Running | 10.42.2.22 | ReplicaSet | agent1 | 85.55m | 2.91Gi | logstash-container:0 | logstash-oss:24.04.0 | +netbox-deployment-cdcff4977-hbbw5 | Running | 10.42.2.23 | ReplicaSet | agent1 | 807.64m | 702.86Mi | netbox-container:0 | netbox:24.04.0 | +suricata-offline-deployment-6ccdb89478-z5696 | Running | 10.42.2.19 | ReplicaSet | agent1 | 0.22m | 34.88Mi | suricata-offline-container:0 | suricata:24.04.0 | +dashboards-deployment-69b5465db-vz88g | Running | 10.42.1.14 | ReplicaSet | agent2 | 0.94m | 100.12Mi | dashboards-container:0 | dashboards:24.04.0 | +netbox-redis-cache-deployment-5f77d47b8b-z7t2z | Running | 10.42.1.15 | ReplicaSet | agent2 | 3.57m | 7.36Mi | netbox-redis-cache-container:0 | redis:24.04.0 | +suricata-live-deployment-6494c77759-9rlnt | Running | 10.42.1.16 | ReplicaSet | agent2 | 0.02m | 9.69Mi | suricata-live-container:0 | suricata:24.04.0 | +freq-deployment-cfd84fd97-dnngf | Running | 10.42.1.17 | ReplicaSet | agent2 | 0.2m | 26.36Mi | freq-container:0 | freq:24.04.0 | +arkime-deployment-56999cdd66-s98pp | Running | 10.42.1.18 | ReplicaSet | agent2 | 4.15m | 113.07Mi | arkime-container:0 | arkime:24.04.0 | +pcap-monitor-deployment-594ff674c4-fsm7m | Running | 10.42.1.19 | ReplicaSet | agent2 | 1.24m | 48.44Mi | pcap-monitor-container:0 | pcap-monitor:24.04.0 | +pcap-capture-deployment-7c8bf6957-jzpzn | Running | 10.42.1.20 | ReplicaSet | agent2 | 0.02m | 9.64Mi | pcap-capture-container:0 | pcap-capture:24.04.0 | +netbox-postgres-deployment-5879b8dffc-kkt56 | Running | 10.42.1.21 | ReplicaSet | agent2 | 70.91m | 33.02Mi | netbox-postgres-container:0 | postgresql:24.04.0 | +htadmin-deployment-6fc46888b9-sq6ln | Running | 10.42.1.23 | ReplicaSet | agent2 | 0.14m | 30.53Mi | htadmin-container:0 | htadmin:24.04.0 | +netbox-redis-deployment-5bcd8f6c96-j5xpf | Running | 10.42.1.24 | ReplicaSet | agent2 | 1.46m | 7.34Mi | netbox-redis-container:0 | redis:24.04.0 | +nginx-proxy-deployment-69fcc4968d-f68tq | Running | 10.42.1.22 | ReplicaSet | agent2 | 0.31m | 22.63Mi | nginx-proxy-container:0 | nginx-proxy:24.04.0 | +opensearch-deployment-75498799f6-4zmwd | Running | 10.42.1.25 | ReplicaSet | agent2 | 89.8m | 11.03Gi | opensearch-container:0 | opensearch:24.04.0 | ``` The other control scripts (`stop`, `restart`, `logs`, etc.) work in a similar manner as in a Docker-based deployment. One notable difference is the `wipe` script: data on PersistentVolume storage cannot be deleted by `wipe`. It must be deleted manually on the storage media underlying the PersistentVolumes. @@ -369,7 +369,7 @@ Select authentication method (Basic): 1 Enable index management policies (ILM/ISM) in Arkime? (y / N): n -Should Malcolm delete the oldest database indices and/or PCAP files based on available storage? (y / N): y +Should Malcolm delete the oldest database indices and capture artifacts based on available storage? (y / N): y Delete the oldest indices when the database exceeds a certain size? (y / N): y @@ -411,6 +411,8 @@ Select file extraction behavior (none): 5 3: none Select file preservation behavior (quarantined): 1 +Enter maximum allowed space for Zeek-extracted files (e.g., 250GB) or file system fill threshold (e.g., 90%): 100G + Expose web interface for downloading preserved files? (y / N): y ZIP downloaded preserved files? (y / N): y @@ -553,28 +555,28 @@ agent1 | agent1 | 192.168.56.11 | agent1 | k3s | 6000m | agent2 | agent2 | 192.168.56.12 | agent2 | k3s | 6000m | 552.71m | 9.21% | 19.55Gi | 13.27Gi | 61.28Gi | 12 | Pod Name | State | Pod IP | Pod Kind | Worker Node | CPU Usage | Memory Usage | Container Name:Restarts | Container Image | -netbox-redis-cache-deployment-5f77d47b8b-jr9nt | Running | 10.42.2.6 | ReplicaSet | agent2 | 1.89m | 7.24Mi | netbox-redis-cache-container:0 | redis:24.03.1 | -netbox-redis-deployment-5bcd8f6c96-bkzmh | Running | 10.42.2.5 | ReplicaSet | agent2 | 1.62m | 7.52Mi | netbox-redis-container:0 | redis:24.03.1 | -dashboards-helper-deployment-69dc54f6b6-ks7ps | Running | 10.42.2.4 | ReplicaSet | agent2 | 12.95m | 40.75Mi | dashboards-helper-container:0 | dashboards-helper:24.03.1 | -freq-deployment-cfd84fd97-5bwp6 | Running | 10.42.2.8 | ReplicaSet | agent2 | 0.11m | 26.33Mi | freq-container:0 | freq:24.03.1 | -pcap-capture-deployment-7c8bf6957-hkvkn | Running | 10.42.2.12 | ReplicaSet | agent2 | 0.02m | 9.21Mi | pcap-capture-container:0 | pcap-capture:24.03.1 | -nginx-proxy-deployment-69fcc4968d-m57rz | Running | 10.42.2.10 | ReplicaSet | agent2 | 0.91m | 22.72Mi | nginx-proxy-container:0 | nginx-proxy:24.03.1 | -htadmin-deployment-6fc46888b9-vpt7l | Running | 10.42.2.7 | ReplicaSet | agent2 | 0.16m | 30.21Mi | htadmin-container:0 | htadmin:24.03.1 | -opensearch-deployment-75498799f6-5v92w | Running | 10.42.2.13 | ReplicaSet | agent2 | 139.2m | 10.86Gi | opensearch-container:0 | opensearch:24.03.1 | -zeek-live-deployment-64b69d4b6f-fcb6n | Running | 10.42.2.9 | ReplicaSet | agent2 | 0.02m | 109.55Mi | zeek-live-container:0 | zeek:24.03.1 | -dashboards-deployment-69b5465db-kgsqk | Running | 10.42.2.3 | ReplicaSet | agent2 | 14.98m | 108.85Mi | dashboards-container:0 | dashboards:24.03.1 | -arkime-deployment-56999cdd66-xxpw9 | Running | 10.42.2.11 | ReplicaSet | agent2 | 208.95m | 78.42Mi | arkime-container:0 | arkime:24.03.1 | -api-deployment-6f4686cf59-xt9md | Running | 10.42.1.3 | ReplicaSet | agent1 | 0.14m | 56.88Mi | api-container:0 | api:24.03.1 | -netbox-postgres-deployment-5879b8dffc-lb4qm | Running | 10.42.1.6 | ReplicaSet | agent1 | 141.2m | 48.02Mi | netbox-postgres-container:0 | postgresql:24.03.1 | -pcap-monitor-deployment-594ff674c4-fwq7g | Running | 10.42.1.12 | ReplicaSet | agent1 | 3.93m | 46.44Mi | pcap-monitor-container:0 | pcap-monitor:24.03.1 | -suricata-offline-deployment-6ccdb89478-j5fgj | Running | 10.42.1.10 | ReplicaSet | agent1 | 10.42m | 35.12Mi | suricata-offline-container:0 | suricata:24.03.1 | -suricata-live-deployment-6494c77759-rpt48 | Running | 10.42.1.8 | ReplicaSet | agent1 | 0.01m | 9.62Mi | suricata-live-container:0 | suricata:24.03.1 | -netbox-deployment-cdcff4977-7ns2q | Running | 10.42.1.7 | ReplicaSet | agent1 | 830.47m | 530.7Mi | netbox-container:0 | netbox:24.03.1 | -zeek-offline-deployment-844f4865bd-7x68b | Running | 10.42.1.9 | ReplicaSet | agent1 | 1.44m | 43.66Mi | zeek-offline-container:0 | zeek:24.03.1 | -filebeat-deployment-6ff8bc444f-pdgzj | Running | 10.42.1.11 | ReplicaSet | agent1 | 0.78m | 75.25Mi | filebeat-container:0 | filebeat-oss:24.03.1 | -file-monitor-deployment-855646bd75-nbngq | Running | 10.42.1.4 | ReplicaSet | agent1 | 1.69m | 1.46Gi | file-monitor-container:0 | file-monitor:24.03.1 | -upload-deployment-586568844b-9s7f5 | Running | 10.42.1.13 | ReplicaSet | agent1 | 0.14m | 29.62Mi | upload-container:0 | file-upload:24.03.1 | -logstash-deployment-6fbc9fdcd5-2hhx8 | Running | 10.42.1.5 | ReplicaSet | agent1 | 3236.29m | 357.36Mi | logstash-container:0 | logstash-oss:24.03.1 | +netbox-redis-cache-deployment-5f77d47b8b-jr9nt | Running | 10.42.2.6 | ReplicaSet | agent2 | 1.89m | 7.24Mi | netbox-redis-cache-container:0 | redis:24.04.0 | +netbox-redis-deployment-5bcd8f6c96-bkzmh | Running | 10.42.2.5 | ReplicaSet | agent2 | 1.62m | 7.52Mi | netbox-redis-container:0 | redis:24.04.0 | +dashboards-helper-deployment-69dc54f6b6-ks7ps | Running | 10.42.2.4 | ReplicaSet | agent2 | 12.95m | 40.75Mi | dashboards-helper-container:0 | dashboards-helper:24.04.0 | +freq-deployment-cfd84fd97-5bwp6 | Running | 10.42.2.8 | ReplicaSet | agent2 | 0.11m | 26.33Mi | freq-container:0 | freq:24.04.0 | +pcap-capture-deployment-7c8bf6957-hkvkn | Running | 10.42.2.12 | ReplicaSet | agent2 | 0.02m | 9.21Mi | pcap-capture-container:0 | pcap-capture:24.04.0 | +nginx-proxy-deployment-69fcc4968d-m57rz | Running | 10.42.2.10 | ReplicaSet | agent2 | 0.91m | 22.72Mi | nginx-proxy-container:0 | nginx-proxy:24.04.0 | +htadmin-deployment-6fc46888b9-vpt7l | Running | 10.42.2.7 | ReplicaSet | agent2 | 0.16m | 30.21Mi | htadmin-container:0 | htadmin:24.04.0 | +opensearch-deployment-75498799f6-5v92w | Running | 10.42.2.13 | ReplicaSet | agent2 | 139.2m | 10.86Gi | opensearch-container:0 | opensearch:24.04.0 | +zeek-live-deployment-64b69d4b6f-fcb6n | Running | 10.42.2.9 | ReplicaSet | agent2 | 0.02m | 109.55Mi | zeek-live-container:0 | zeek:24.04.0 | +dashboards-deployment-69b5465db-kgsqk | Running | 10.42.2.3 | ReplicaSet | agent2 | 14.98m | 108.85Mi | dashboards-container:0 | dashboards:24.04.0 | +arkime-deployment-56999cdd66-xxpw9 | Running | 10.42.2.11 | ReplicaSet | agent2 | 208.95m | 78.42Mi | arkime-container:0 | arkime:24.04.0 | +api-deployment-6f4686cf59-xt9md | Running | 10.42.1.3 | ReplicaSet | agent1 | 0.14m | 56.88Mi | api-container:0 | api:24.04.0 | +netbox-postgres-deployment-5879b8dffc-lb4qm | Running | 10.42.1.6 | ReplicaSet | agent1 | 141.2m | 48.02Mi | netbox-postgres-container:0 | postgresql:24.04.0 | +pcap-monitor-deployment-594ff674c4-fwq7g | Running | 10.42.1.12 | ReplicaSet | agent1 | 3.93m | 46.44Mi | pcap-monitor-container:0 | pcap-monitor:24.04.0 | +suricata-offline-deployment-6ccdb89478-j5fgj | Running | 10.42.1.10 | ReplicaSet | agent1 | 10.42m | 35.12Mi | suricata-offline-container:0 | suricata:24.04.0 | +suricata-live-deployment-6494c77759-rpt48 | Running | 10.42.1.8 | ReplicaSet | agent1 | 0.01m | 9.62Mi | suricata-live-container:0 | suricata:24.04.0 | +netbox-deployment-cdcff4977-7ns2q | Running | 10.42.1.7 | ReplicaSet | agent1 | 830.47m | 530.7Mi | netbox-container:0 | netbox:24.04.0 | +zeek-offline-deployment-844f4865bd-7x68b | Running | 10.42.1.9 | ReplicaSet | agent1 | 1.44m | 43.66Mi | zeek-offline-container:0 | zeek:24.04.0 | +filebeat-deployment-6ff8bc444f-pdgzj | Running | 10.42.1.11 | ReplicaSet | agent1 | 0.78m | 75.25Mi | filebeat-container:0 | filebeat-oss:24.04.0 | +file-monitor-deployment-855646bd75-nbngq | Running | 10.42.1.4 | ReplicaSet | agent1 | 1.69m | 1.46Gi | file-monitor-container:0 | file-monitor:24.04.0 | +upload-deployment-586568844b-9s7f5 | Running | 10.42.1.13 | ReplicaSet | agent1 | 0.14m | 29.62Mi | upload-container:0 | file-upload:24.04.0 | +logstash-deployment-6fbc9fdcd5-2hhx8 | Running | 10.42.1.5 | ReplicaSet | agent1 | 3236.29m | 357.36Mi | logstash-container:0 | logstash-oss:24.04.0 | ``` View container logs for the Malcolm deployment with `./scripts/logs` (if **[stern](https://github.com/stern/stern)** present in `$PATH`): diff --git a/docs/malcolm-config.md b/docs/malcolm-config.md index 22ad45283..582865e76 100644 --- a/docs/malcolm-config.md +++ b/docs/malcolm-config.md @@ -12,7 +12,8 @@ Although the configuration script automates many of the following configuration - `ARKIME_AUTO_ANALYZE_PCAP_THREADS` – the number of threads available to Arkime for analyzing PCAP files (default `1`) - `ARKIME_PASSWORD_SECRET` - the password hash secret for the Arkime viewer cluster (see `passwordSecret` in [Arkime INI Settings](https://arkime.com/settings)) used to secure the connection used when Arkime viewer retrieves a PCAP payload for display in its user interface - `ARKIME_ROTATE_INDEX` - how often (based on network traffic timestamp) to [create a new index](https://arkime.com/settings#rotateIndex) in OpenSearch - - `MANAGE_PCAP_FILES` – if set to `true`, all PCAP files imported into Malcolm will be marked as available for deletion by Arkime if available storage space becomes too low (default `false`) + - `ARKIME_QUERY_ALL_INDICES` - whether or not Arkime should [query all indices](https://arkime.com/settings#queryAllIndices) instead of trying to calculate which ones pertain to the search time frame (default `false`) + - `MANAGE_PCAP_FILES` and `ARKIME_FREESPACEG` - these variables deal with PCAP [deletion by Arkime](https://arkime.com/faq#pcap-deletion), see [**Managing disk usage**](#DiskUsage) below - `MAXMIND_GEOIP_DB_LICENSE_KEY` - Malcolm uses MaxMind's free GeoLite2 databases for GeoIP lookups. As of December 30, 2019, these databases are [no longer available](https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/) for download via a public URL. Instead, they must be downloaded using a MaxMind license key (available without charge [from MaxMind](https://www.maxmind.com/en/geolite2/signup)). The license key can be specified here for GeoIP database downloads during build- and run-time. - The following variables configure [Arkime's use](index-management.md#ArkimeIndexPolicies) of OpenSearch [Index State Management (ISM)](https://opensearch.org/docs/latest/im-plugin/ism/index/) or Elasticsearch [Index Lifecycle Management (ILM)](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-lifecycle-management.html): + `INDEX_MANAGEMENT_ENABLED` - if set to `true`, Malcolm's instance of Arkime will [use these features](https://arkime.com/faq#ilm) when indexing data @@ -30,8 +31,11 @@ Although the configuration script automates many of the following configuration - `LOGSTASH_HOST` – the host and port at which Beats-based forwarders will connect to Logstash (default `logstash:5044`); see `MALCOLM_PROFILE` below * **`dashboards.env`** and **`dashboards-helper.env`** - settings for the containers that configure and maintain [OpenSearch](https://opensearch.org/) and [OpenSearch Dashboards](https://opensearch.org/docs/latest/dashboards/index/) - `DASHBOARDS_URL` - used primarily when `OPENSEARCH_PRIMARY` is set to `elasticsearch-remote` (see [OpenSearch and Elasticsearch instances](opensearch-instances.md#OpenSearchInstance)), this variable stores the URL for the [Kibana](https://www.elastic.co/kibana) instance into which Malcolm's dashboard's and index templates will be imported + - `DASHBOARDS_PREFIX` – a string to prepend to the titles of Malcolm's prebuilt [dashboards](dashboards.md#PrebuiltVisualizations) prior upon import during Malcolm's initialization (default is an empty string) - `DASHBOARDS_DARKMODE` – if set to `true`, [OpenSearch Dashboards](dashboards.md#DashboardsVisualizations) will be set to dark mode upon initialization (default `true`) + - `OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT` - the maximum cumulative size of OpenSearch indices are allowed to consume before the oldest indices are deleted, see [**Managing disk usage**](#DiskUsage) below * **`filebeat.env`** - settings specific to [Filebeat](https://www.elastic.co/products/beats/filebeat), particularly for how Filebeat watches for new log files to parse and how it receives and stores [third-Party logs](third-party-logs.md#ThirdPartyLogs) + - `LOG_CLEANUP_MINUTES` and `ZIP_CLEANUP_MINUTES` - these variables deal cleaning up already-processed log files, see [**Managing disk usage**](#DiskUsage) below * **`logstash.env`** - settings specific to [Logstash](https://www.elastic.co/products/logstash) - `LOGSTASH_OUI_LOOKUP` – if set to `true`, Logstash will map MAC addresses to vendors for all source and destination MAC addresses when analyzing Zeek logs (default `true`) - `LOGSTASH_REVERSE_DNS` – if set to `true`, Logstash will perform a reverse DNS lookup for all external source and destination IP address values when analyzing Zeek logs (default `false`) @@ -106,7 +110,7 @@ Although the configuration script automates many of the following configuration - `EXTRACTED_FILE_HTTP_SERVER_KEY` – specifies the password for the ZIP archive if `EXTRACTED_FILE_HTTP_SERVER_ZIP` is `true`; otherwise, this specifies the decryption password for encrypted Zeek-extracted files in an `openssl enc`-compatible format (e.g., `openssl enc -aes-256-cbc -d -in example.exe.encrypted -out example.exe`) - `EXTRACTED_FILE_IGNORE_EXISTING` – if set to `true`, files extant in `./zeek-logs/extract_files/` directory will be ignored on startup rather than scanned - `EXTRACTED_FILE_PRESERVATION` – determines behavior for preservation of [Zeek-extracted files](file-scanning.md#ZeekFileExtraction) - - `EXTRACTED_FILE_UPDATE_RULES` – if set to `true`, file scanner engines (e.g., ClamAV, Capa, Yara) will periodically update their rule definitions (default `false`) + - `EXTRACTED_FILE_UPDATE_RULES` – if set to `true`, file scanner engines (e.g., ClamAV, Capa, Yara) will periodically update their rule definitions (default `false`) - `EXTRACTED_FILE_YARA_CUSTOM_ONLY` – if set to `true`, Malcolm will bypass the default Yara rulesets ([Neo23x0/signature-base](https://github.com/Neo23x0/signature-base), [reversinglabs/reversinglabs-yara-rules](https://github.com/reversinglabs/reversinglabs-yara-rules), and [bartblaze/Yara-rules](https://github.com/bartblaze/Yara-rules)) and use only [user-defined rules](custom-rules.md#YARA) in `./yara/rules` - `VTOT_API2_KEY` – used to specify a [VirusTotal Public API v.20](https://www.virustotal.com/en/documentation/public-api/) key, which, if specified, will be used to submit hashes of [Zeek-extracted files](file-scanning.md#ZeekFileExtraction) to VirusTotal - `ZEEK_AUTO_ANALYZE_PCAP_FILES` – if set to `true`, all PCAP files imported into Malcolm will automatically be analyzed by Zeek, and the resulting logs will also be imported (default `false`) @@ -123,6 +127,7 @@ Although the configuration script automates many of the following configuration - `ZEEK_LIVE_CAPTURE` - if set to `true`, Zeek will monitor live traffic on the local interface(s) defined by `PCAP_FILTER` - `ZEEK_LOCAL_NETS` - specifies the value for Zeek's [`Site::local_nets`](https://docs.zeek.org/en/master/scripts/base/utils/site.zeek.html#id-Site::local_nets) variable (and `networks.cfg` for live capture) (e.g., `1.2.3.0/24,5.6.7.0/24`); note that by default, Zeek considers IANA-registered private address space such as `10.0.0.0/8` and `192.168.0.0/16` site-local - `ZEEK_ROTATED_PCAP` - if set to `true`, Zeek can analyze captured PCAP files captured by `netsniff-ng` or `tcpdump` (see `PCAP_ENABLE_NETSNIFF` and `PCAP_ENABLE_TCPDUMP`, as well as `ZEEK_AUTO_ANALYZE_PCAP_FILES`); if `ZEEK_LIVE_CAPTURE` is `true`, this should be `false`; otherwise Zeek will see duplicate traffic + - See [**Managing disk usage**](#DiskUsage) below for a discussion of the variables control automatic threshold-based deletion of the oldest [Zeek-extracted files](file-scanning.md#ZeekFileExtraction). ## Command-line arguments @@ -146,4 +151,22 @@ options: Note that the value for **any** argument not specified on the command line will be reset to its default (as if for a new Malcolm installation) regardless of the setting's current value in the corresponding `.env` file. In other words, users who want to use the `--defaults` option should carefully review all available command-line options and choose all that apply. -Similarly, [authentication](authsetup.md#AuthSetup)-related settings can also be set noninteractively by using the [command-line arguments](authsetup.md#CommandLineConfig) for `./scripts/auth_setup`. \ No newline at end of file +Similarly, [authentication](authsetup.md#AuthSetup)-related settings can also be set noninteractively by using the [command-line arguments](authsetup.md#CommandLineConfig) for `./scripts/auth_setup`. + +## Managing disk usage + +In instances where Malcolm is deployed with the intention of running indefinitely, eventually the question arises of what to do when the file systems used for storing Malcolm's artifacts (e.g., PCAP files, raw logs, [OpenSearch indices](index-management.md), [extracted files](file-scanning.md#ZeekFileExtraction), etc.). Malcolm provides [options](#MalcolmConfigEnvVars) for tuning the "aging out" (deletion) of old artifacts to make room for newer data. + +* PCAP deletion is configured by environment variables in **`arkime.env`**: + - `MANAGE_PCAP_FILES` – if set to `true`, all PCAP files imported into Malcolm will be marked as available for [deletion by Arkime](https://arkime.com/faq#pcap-deletion) if available storage space becomes too low (default `false`) + - `ARKIME_FREESPACEG` - when `MANAGE_PCAP_FILES` is `true`, this value is [used by Arkime](https://arkime.com/settings#freespaceg) to determine when to delete the oldest PCAP files. Note that this variable represents the amount of free/unused/available desired on the file system: e.g., a value of `5%` means "delete PCAP files if the amount of unused storage on the file system falls below 5%" (default `10%`). +* Zeek logs and Suricata logs are temporarily stored on disk as they are parsed, enriched, and indexed, and afterwards are periodically [pruned]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/filebeat/scripts/clean-processed-folder.py) from the file system as they age, based on these variables in **`filebeat.env`**: + - `LOG_CLEANUP_MINUTES` - specifies the age, in minutes, at which already-processed log files should be deleted + - `ZIP_CLEANUP_MINUTES` - specifies the age, in minutes, at which the compressed archives containing already-processed log files should be deleted +* Files [extracted by Zeek](file-scanning.md#ZeekFileExtraction) stored in the `./zeek-logs/extract_files/` directory can be periodically [pruned]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/prune_files.sh) based on the following variables in **`zeek.env`**. If either of the two threshold limits defined here are met, the oldest extracted files will be deleted until the limit is no longer met. Setting either of the threshold limits to `0` disables that check. + - `EXTRACTED_FILE_PRUNE_THRESHOLD_MAX_SIZE` - specifies the maximum size, specified either in gigabytes or as a human-readable data size (e.g., `250G`), that the `./zeek-logs/extract_files/` directory is allowed to contain before the prune condition triggers + - `EXTRACTED_FILE_PRUNE_THRESHOLD_TOTAL_DISK_USAGE_PERCENT` - specifies a maximum fill percentage for the file system containing the `./zeek-logs/extract_files/`; in other words, if the disk is more than this percentage utilized, the prune condition triggers + - `EXTRACTED_FILE_PRUNE_INTERVAL_SECONDS` - the interval between checking the prune conditions, in seconds (default `300`) +* [Index management policies](index-management.md) can be handled via plugins provided as part of the OpenSearch and Elasticsearch platforms, respectively. In addition to those tools, the `OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT` variable in **`dashboards-helper.env`** defines a maximum cumulative that OpenSearch indices are allowed to consume before the oldest indices [are deleted]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/opensearch_index_size_prune.py), specified as either as a human-readable data size (e.g., `250G`) or as a percentage of the total disk size (e.g., `70%`): e.g., a value of `500G` means "delete the oldest OpenSearch indices if the total space consumed by Malcolm's indices exceeds five hundred gigabytes." + +Similar settings exist on for managing disk usage [Hedgehog Linux](malcolm-hedgehog-e2e-iso-install.md#HedgehogDiskUsage). \ No newline at end of file diff --git a/docs/malcolm-hedgehog-e2e-iso-install.md b/docs/malcolm-hedgehog-e2e-iso-install.md index 69131bbbe..4a4d3feea 100644 --- a/docs/malcolm-hedgehog-e2e-iso-install.md +++ b/docs/malcolm-hedgehog-e2e-iso-install.md @@ -29,7 +29,9 @@ In contrast to using the ISO installer, Malcolm can also be installed "natively" * [ssl-client-receive](#HedgehogGetCerts): Receive client SSL files for filebeat from Malcolm * [filebeat](#Hedgehogfilebeat): Zeek and Suricata log forwarding * [miscbeat](#Hedgehogmiscbeat): System metrics forwarding - + [Autostart services](#HedgehogConfigAutostart) + * [acl-configure](#HedgehogACL): Configure ACL for artifact reachback from Malcolm + - [Autostart services](#HedgehogConfigAutostart) + - [Managing disk usage](#HedgehogDiskUsage) * [Verifying Traffic Capture and Forwarding](#Verify) ## Obtaining the Installation ISOs @@ -185,8 +187,8 @@ The [configuration and tuning](malcolm-config.md#ConfigAndTuning) wizard's quest - This defines how many additional copies of older session indices Arkime should store. - **How many weeks of history should Arkime keep?",** - This defines the retention period (in weeks) for `arkime-history` indices. -* **Should Malcolm delete the oldest database indices and/or PCAP files based on available storage?** - - Choose **Y** to proceed to the following related questions about managing the data storage used by Malcolm. +* **Should Malcolm delete the oldest database indices and capture artifacts based on available storage?** + - Choose **Y** to proceed to the following related questions about [managing the data storage](malcolm-config.md#DiskUsage) used by Malcolm. - **Delete the oldest indices when the database exceeds a certain size?** - Most of the configuration around OpenSearch [Index State Management](https://opensearch.org/docs/latest/im-plugin/ism/index/) and [Snapshot Management](https://opensearch.org/docs/latest/opensearch/snapshots/sm-dashboards/) can be done in OpenSearch Dashboards. In addition to (or instead of) the OpenSearch index state management operations, Malcolm can also be configured to delete the oldest network session metadata indices when the database exceeds a certain size to prevent filling up all available storage with OpenSearch indices. - **Should Arkime delete uploaded PCAP files based on available storage?** @@ -246,6 +248,8 @@ The [configuration and tuning](malcolm-config.md#ConfigAndTuning) wizard's quest + `quarantined`: preserve only flagged files in `./zeek-logs/extract_files/quarantine` + `all`: preserve flagged files in `./zeek-logs/extract_files/quarantine` and all other extracted files in `./zeek-logs/extract_files/preserved` + `none`: preserve no extracted files +* **Enter maximum allowed space for Zeek-extracted files (e.g., 250GB) or file system fill threshold (e.g., 90%)** + - Files [extracted by Zeek](file-scanning.md#ZeekFileExtraction) can be periodically pruned to ensure the disk storage they consume does not exceed a user-specified threshold. See the documentation on [managing Malcolm's disk usage](malcolm-config.md#DiskUsage) for more information. * **Expose web interface for downloading preserved files?** - Answering **Y** enables access to the Zeek-extracted files path through the means of a simple HTTPS directory server at **https:///extracted-files/**. Beware that Zeek-extracted files may contain malware. * **ZIP downloaded preserved files?** @@ -287,7 +291,7 @@ The [configuration and tuning](malcolm-config.md#ConfigAndTuning) wizard's quest - **Should Malcolm analyze live network traffic with Zeek?** - Answering **Y** will allow Malcolm itself to perform [live traffic analysis](live-analysis.md#LocalPCAP) using Zeek. Users configuring Hedgehog Linux for capture probably want to answer **N** to this question. See the question above above about "captur[ing] live network traffic." - **Capture filter (tcpdump-like filter expression; leave blank to capture all traffic)** - - If Malcolm is doing its own [live traffic analysis](live-analysis.md#LocalPCAP) as described above, users may optionally provide a capture filter. This filter will be used to limit what traffic the PCAP service ([netsniff-ng](http://netsniff-ng.org/) or [tcpdump](https://www.tcpdump.org/)) and the traffic analysis services ([Zeek](https://www.zeek.org/) and [Suricata](https://suricata.io/)) will see. Capture filters are specified using [Berkeley Packet Filter (BPF)](http://biot.com/capstats/bpf.html) syntax. For example, to indicate that Malcolm should ignore the ports it uses to communicate with Hedgehog Linux, users could specify `not port 5044 and not port 5045 and not port 8005 and not port 9200`. + - If Malcolm is doing its own [live traffic analysis](live-analysis.md#LocalPCAP) as described above, users may optionally provide a capture filter. This filter will be used to limit what traffic the PCAP service ([netsniff-ng](http://netsniff-ng.org/) or [tcpdump](https://www.tcpdump.org/)) and the traffic analysis services ([Zeek](https://www.zeek.org/) and [Suricata](https://suricata.io/)) will see. Capture filters are specified using [Berkeley Packet Filter (BPF)](http://biot.com/capstats/bpf.html) syntax. For example, to indicate that Malcolm should ignore the ports it uses to communicate with Hedgehog Linux, users could specify `not port 5044 and not port 5045 and not port 8005 and not port 8006 and not port 9200`. - **Disable capture interface hardware offloading and adjust ring buffer sizes?** - If Malcolm is doing its own [live traffic analysis](live-analysis.md#LocalPCAP) and users answer **Y** to this question, Malcolm will [use `ethtool`]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/nic-capture-setup.sh) to disable NIC hardware offloading features and adjust ring buffer sizes for capture interface(s); this should be enabled if the interface(s) are being used for capture **only**, otherwise answer **N**. If unsure, users should probably answer **N**. * **Specify capture interface(s) (comma-separated)** @@ -303,7 +307,7 @@ Here users can configure Malcolm to keep its time synchronized with either an NT ![Time synchronization method](./images/hedgehog/images/time_sync_mode.png) -If **htpdate** is selected, users will be prompted to enter the IP address or hostname and port of an HTTP/HTTPS server (for another Malcolm instance, port `9200` may be used) and the time synchronization check frequency in minutes. A test connection will be made to determine if the time can be retrieved from the server. +If **htpdate** is selected, users will be prompted to enter the URL of an HTTP/HTTPS server (for another Malcolm instance, either port `443` or port `9200` over `https` may be used) and the time synchronization check frequency in minutes. A test connection will be made to determine if the time can be retrieved from the server. ![*htpdate* configuration](./images/hedgehog/images/htpdate_setup.png) @@ -421,7 +425,7 @@ Returning to the configuration mode selection, choose **Time Sync**. Here users ![Time synchronization method](./images/hedgehog/images/time_sync_mode.png) -If **htpdate** is selected, users will be prompted to enter the IP address or hostname and port of an HTTP/HTTPS server (for a Malcolm instance, port `9200` may be used) and the time synchronization check frequency in minutes. A test connection will be made to determine if the time can be retrieved from the server. +If **htpdate** is selected, users will be prompted to enter the URL of an HTTP/HTTPS server (for another Malcolm instance, either port `443` or port `9200` over `https` may be used) and the time synchronization check frequency in minutes. A test connection will be made to determine if the time can be retrieved from the server. ![*htpdate* configuration](./images/hedgehog/images/htpdate_setup.png) @@ -445,7 +449,7 @@ Users will be presented with a list of network interfaces and prompted to select ![Select capture interfaces](./images/hedgehog/images/capture_iface_select.png) -Upon choosing the capture interfaces and selecting OK, users may optionally provide a capture filter. This filter will be used to limit what traffic the PCAP service ([netsniff-ng](http://netsniff-ng.org/) or [tcpdump](https://www.tcpdump.org/)) and the traffic analysis services ([`zeek`](https://www.zeek.org/) and [`suricata`](https://suricata.io/)) will see. Capture filters are specified using [Berkeley Packet Filter (BPF)](http://biot.com/capstats/bpf.html) syntax. For example, to indicate Hedgehog should ignore the ports it uses to communicate with Malcolm, users could specify `not port 5044 and not port 5045 and not port 8005 and not port 9200`. Clicking **OK** will attempt to validate the capture filter, if specified, and will present a warning if the filter is invalid. +Upon choosing the capture interfaces and selecting OK, users may optionally provide a capture filter. This filter will be used to limit what traffic the PCAP service ([netsniff-ng](http://netsniff-ng.org/) or [tcpdump](https://www.tcpdump.org/)) and the traffic analysis services ([`zeek`](https://www.zeek.org/) and [`suricata`](https://suricata.io/)) will see. Capture filters are specified using [Berkeley Packet Filter (BPF)](http://biot.com/capstats/bpf.html) syntax. For example, to indicate Hedgehog should ignore the ports it uses to communicate with Malcolm, users could specify `not port 5044 and not port 5045 and not port 8005 and not port 8006 and not port 9200`. Clicking **OK** will attempt to validate the capture filter, if specified, and will present a warning if the filter is invalid. ![Specify capture filters](./images/hedgehog/images/capture_filter.png) @@ -465,6 +469,8 @@ If unsure what mode to choose, both **mapped (except common plain text files)** Next, specify which carved files to preserve (saved on the sensor under `/capture/zeek/capture/extract_files/quarantine` by default). In order to not consume all the sensor's available storage space, the oldest preserved files will be pruned along with the oldest Zeek logs as described below with **AUTOSTART_PRUNE_ZEEK** in the [autostart services](#HedgehogConfigAutostart) section. +![File quarantine](./images/hedgehog/images/file_quarantine.png) + Users will prompted to specify which engine(s) to use to analyze extracted files. Extracted files can be examined through any of three methods: ![File scanners](./images/hedgehog/images/zeek_file_carve_scanners.png) @@ -476,9 +482,11 @@ Users will prompted to specify which engine(s) to use to analyze extracted files Files flagged as potentially malicious will be logged as Zeek `signatures.log` entries, and can be viewed in the **Signatures** dashboard in [OpenSearch Dashboards]({{ site.github.repository_url }}#DashboardsVisualizations) when forwarded to Malcolm. -![File quarantine](./images/hedgehog/images/file_quarantine.png) +Hedgehog Linux provides an extracted files directory listing to browse and download Zeek-extracted files. As this interface is primarily intended to be accessed through the Malcolm user interface, this service is accessible only by IP addresses [included in the ACL for artifact reachback from Malcolm](#HedgehogACL) over port `8006/tcp`. The next two questions indicate whether or not Zeek-extracted files downloaded through this interface will be archived using the ZIP file format and what encryption password should be used, if any (either the ZIP archive file password or as the encryption key for AES-256-CBC-encrypted files if not using ZIP). Please read the Malcolm documentation for [**Automatic file extraction and scanning - User interface**](file-scanning.md#ZeekFileExtractionUI) for more information on how to access preserved files. + +![Extracted file server configuration](./images/hedgehog/images/file_server_zip.png) -Finally, users will be presented with the list of configuration variables that will be used for capture, including the values which have been selected up to this point in this section. Upon choosing **OK** these values will be written back out to the sensor configuration file located at `/opt/sensor/sensor_ctl/control_vars.conf`. Editing this file manually is not recommended. After confirming these values, users will be presented with a confirmation that these settings have been written to the configuration file then returned to the welcome screen. +Finally, users will be presented with the list of configuration variables that will be used for capture, including the values which have been selected up to this point in this section. Upon choosing **OK** these values will be written back out to the sensor configuration file located at `/opt/sensor/sensor_ctl/control_vars.conf`. Editing this file manually should be done with care. After confirming these values, users will be presented with a confirmation that these settings have been written to the configuration file then returned to the welcome screen. ## Configure Forwarding @@ -506,10 +514,6 @@ Users will be asked to enter authentication credentials for the sensor's connect Users will be asked to provide a "password hash secret" for the Arkime viewer cluster. This value corresponds to the `passwordSecret` value in Arkime's [config.ini file](https://arkime.com/settings). Arkime uses this value to secure communication (specifically, the connection used when Arkime viewer retrieves a PCAP payload for display in its user interface) between Arkime viewers in instances of Malcolm and Hedgehog Linux. In other words, this value needs to be the same for the Malcolm instance and all of the instances of Hedgehog Linux forwarding Arkime sessions to that Malcolm instance. The corresponding value is set when [setting up authentication](#MalcolmAuthSetup) during the Malcolm configuration. -Users will be shown a dialog for a list of IP addresses used to populate an access control list (ACL) for hosts allowed to connect back to the sensor for retrieving session payloads from its PCAP files for display in Arkime viewer. The list will be prepopulated with the IP address entered a few screens prior to this one. - -![PCAP retrieval ACL](./images/hedgehog/images/malcolm_arkime_reachback_acl.png) - Arkime supports [compression](https://arkime.com/settings#writer-simple) for the PCAP files it creates. Select `none` (at the cost of requiring more storage for PCAP files saved on the sensor) or `zstd` (at the cost of higher CPU load when writing and reading PCAP files). If [`zstd`](https://en.wikipedia.org/wiki/Zstd?lang=en) is chosen, users will also be prompted for the compression level (something like `3` is probably a good choice). ![PCAP compression](./images/hedgehog/images/pcap_compression.png) @@ -576,6 +580,12 @@ The sensor uses [Fluent Bit](https://fluentbit.io/) to gather miscellaneous syst This forwarder's configuration is almost identical to that of [filebeat](#Hedgehogfilebeat) in the previous section. Select `miscbeat` from the forwarding configuration mode options and follow the same steps outlined above to set up this forwarder. +### acl-configure: Configure ACL for artifact reachback from Malcolm + +Users will be shown a dialog for a list of IP addresses used to populate a firewall access control list (ACL) for hosts allowed to connect back to the sensor for retrieving session payloads from its PCAP files (over port `8005/tcp`) for display in Arkime viewer and for downloading files (over port `8006/tcp`) [extracted and preserved by Zeek](#HedgehogZeekFileExtraction). The list will be prepopulated with the IP address entered a few screens prior to this one. + +![PCAP retrieval ACL](./images/hedgehog/images/malcolm_arkime_reachback_acl.png) + ### Autostart services Once the forwarders have been configured, the final step is to **Configure Autostart Services**. Choose this option from the configuration mode menu after the welcome screen of the sensor configuration tool. @@ -584,6 +594,7 @@ Despite configuring capture and/or forwarder services as described in previous s * **AUTOSTART_ARKIME** - [capture](#Hedgehogarkime-capture) PCAP engine for traffic capture, as well as traffic parsing and metadata insertion into OpenSearch for viewing in [Arkime](https://arkime.com/). If using Hedgehog Linux along with [Malcolm]({{ site.github.repository_url }}) or another Arkime installation, this is probably the preferable packet capture engine. * **AUTOSTART_CLAMAV_UPDATES** - Virus database update service for ClamAV (requires sensor to be connected to the Internet) +* **AUTOSTART_EXTRACTED_FILE_HTTP_SERVER** - the [HTTPS server](file-scanning.md#ZeekFileExtractionUI) providing access to the directory containing [Zeek-extracted files](#HedgehogZeekFileExtraction) * **AUTOSTART_FILEBEAT** - [filebeat](#Hedgehogfilebeat) Zeek and Suricata log forwarder * **AUTOSTART_FLUENTBIT_AIDE** - [Fluent Bit](https://fluentbit.io/) agent [monitoring](https://docs.fluentbit.io/manual/pipeline/inputs/exec) [AIDE](https://aide.github.io/) file system integrity checks * **AUTOSTART_FLUENTBIT_AUDITLOG** - [Fluent Bit](https://fluentbit.io/) agent [monitoring](https://docs.fluentbit.io/manual/pipeline/inputs/tail) [auditd](https://man7.org/linux/man-pages/man8/auditd.8.html) logs @@ -649,6 +660,18 @@ zeek:yara RUNNING pid 6548, uptime 0:03:17 zeek:zeekctl RUNNING pid 6502, uptime 0:03:17 ``` +### Managing disk usage + +In instances where Hedgehog Linux is deployed with the intention of running indefinitely, eventually the question arises of what to do when the file systems used for storing Malcolm's artifacts (e.g., PCAP files, raw logs, [extracted files](file-scanning.md#ZeekFileExtraction), etc.). Hedgehog Linux provides options for tuning the "aging out" (deletion) of old artifacts to make room for newer data. These are configured during [Configure Capture](#HedgehogCapture) and are stored in the `/opt/sensor/sensor_ctl/control_vars.conf` configuration file. Editing this file manually should be done with care. + +* PCAP files can be periodically [pruned]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/prune_files.sh) according to these variables: + - `PCAP_MAX_DISK_FILL` - a maximum fill percentage for the file system containing the PCAP files; in other words, if the disk is more than this percentage utilized, the prune condition triggers + - `PCAP_PRUNE_CHECK_SECONDS` - the interval between checking the PCAP prune condition, in seconds + - `ARKIME_FREESPACEG` - this value is [used by Arkime](https://arkime.com/settings#freespaceg) to determine when to delete the oldest PCAP files. Note that this variable represents the amount of free/unused/available desired on the file system: e.g., a value of `5%` means "delete PCAP files if the amount of unused storage on the file system falls below 5%" (default `10%`). Observant users will note that there overlap in Arkime's PCAP deletion process and the process using the `PCAP_MAX_DISK_FILL` above: either process may delete old PCAP files depending on which conditions trigger first. +* Zeek logs, files [extracted by Zeek](file-scanning.md#ZeekFileExtraction), and Suricata logs are [pruned]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/prune_files.sh) according to these variables: + - `ZEEK_MAX_DISK_FILL` - a maximum fill percentage for the file system containing these artifacts; in other words, if the disk is more than this percentage utilized, the prune condition triggers + - `ZEEK_PRUNE_CHECK_SECONDS` - the interval between checking the prune condition for these artifacts, in seconds + ## Verifying Traffic Capture and Forwarding The easiest way to verify network traffic is being captured by the sensor and forwarded to Malcolm is through Malcolm's Arkime [Sessions](arkime.md#ArkimeSessions) interface. diff --git a/docs/malcolm-iso.md b/docs/malcolm-iso.md index 0d0d167a4..73f13f416 100644 --- a/docs/malcolm-iso.md +++ b/docs/malcolm-iso.md @@ -41,7 +41,7 @@ Building the ISO may take 30 minutes or more depending on the system. As the bui ``` … -Finished, created "/malcolm-build/malcolm-iso/malcolm-24.03.1.iso" +Finished, created "/malcolm-build/malcolm-iso/malcolm-24.04.0.iso" … ``` diff --git a/docs/quickstart.md b/docs/quickstart.md index 61990e4e2..1626b2df1 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -54,25 +54,25 @@ You can then observe the images have been retrieved by running `docker images`: ``` $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ghcr.io/idaholab/malcolm/api 24.03.1 xxxxxxxxxxxx 3 days ago 158MB -ghcr.io/idaholab/malcolm/arkime 24.03.1 xxxxxxxxxxxx 3 days ago 816MB -ghcr.io/idaholab/malcolm/dashboards 24.03.1 xxxxxxxxxxxx 3 days ago 1.02GB -ghcr.io/idaholab/malcolm/dashboards-helper 24.03.1 xxxxxxxxxxxx 3 days ago 184MB -ghcr.io/idaholab/malcolm/file-monitor 24.03.1 xxxxxxxxxxxx 3 days ago 588MB -ghcr.io/idaholab/malcolm/file-upload 24.03.1 xxxxxxxxxxxx 3 days ago 259MB -ghcr.io/idaholab/malcolm/filebeat-oss 24.03.1 xxxxxxxxxxxx 3 days ago 624MB -ghcr.io/idaholab/malcolm/freq 24.03.1 xxxxxxxxxxxx 3 days ago 132MB -ghcr.io/idaholab/malcolm/htadmin 24.03.1 xxxxxxxxxxxx 3 days ago 242MB -ghcr.io/idaholab/malcolm/logstash-oss 24.03.1 xxxxxxxxxxxx 3 days ago 1.35GB -ghcr.io/idaholab/malcolm/netbox 24.03.1 xxxxxxxxxxxx 3 days ago 1.01GB -ghcr.io/idaholab/malcolm/nginx-proxy 24.03.1 xxxxxxxxxxxx 3 days ago 121MB -ghcr.io/idaholab/malcolm/opensearch 24.03.1 xxxxxxxxxxxx 3 days ago 1.17GB -ghcr.io/idaholab/malcolm/pcap-capture 24.03.1 xxxxxxxxxxxx 3 days ago 121MB -ghcr.io/idaholab/malcolm/pcap-monitor 24.03.1 xxxxxxxxxxxx 3 days ago 213MB -ghcr.io/idaholab/malcolm/postgresql 24.03.1 xxxxxxxxxxxx 3 days ago 268MB -ghcr.io/idaholab/malcolm/redis 24.03.1 xxxxxxxxxxxx 3 days ago 34.2MB -ghcr.io/idaholab/malcolm/suricata 24.03.1 xxxxxxxxxxxx 3 days ago 278MB -ghcr.io/idaholab/malcolm/zeek 24.03.1 xxxxxxxxxxxx 3 days ago 1GB +ghcr.io/idaholab/malcolm/api 24.04.0 xxxxxxxxxxxx 3 days ago 158MB +ghcr.io/idaholab/malcolm/arkime 24.04.0 xxxxxxxxxxxx 3 days ago 816MB +ghcr.io/idaholab/malcolm/dashboards 24.04.0 xxxxxxxxxxxx 3 days ago 1.02GB +ghcr.io/idaholab/malcolm/dashboards-helper 24.04.0 xxxxxxxxxxxx 3 days ago 184MB +ghcr.io/idaholab/malcolm/file-monitor 24.04.0 xxxxxxxxxxxx 3 days ago 588MB +ghcr.io/idaholab/malcolm/file-upload 24.04.0 xxxxxxxxxxxx 3 days ago 259MB +ghcr.io/idaholab/malcolm/filebeat-oss 24.04.0 xxxxxxxxxxxx 3 days ago 624MB +ghcr.io/idaholab/malcolm/freq 24.04.0 xxxxxxxxxxxx 3 days ago 132MB +ghcr.io/idaholab/malcolm/htadmin 24.04.0 xxxxxxxxxxxx 3 days ago 242MB +ghcr.io/idaholab/malcolm/logstash-oss 24.04.0 xxxxxxxxxxxx 3 days ago 1.35GB +ghcr.io/idaholab/malcolm/netbox 24.04.0 xxxxxxxxxxxx 3 days ago 1.01GB +ghcr.io/idaholab/malcolm/nginx-proxy 24.04.0 xxxxxxxxxxxx 3 days ago 121MB +ghcr.io/idaholab/malcolm/opensearch 24.04.0 xxxxxxxxxxxx 3 days ago 1.17GB +ghcr.io/idaholab/malcolm/pcap-capture 24.04.0 xxxxxxxxxxxx 3 days ago 121MB +ghcr.io/idaholab/malcolm/pcap-monitor 24.04.0 xxxxxxxxxxxx 3 days ago 213MB +ghcr.io/idaholab/malcolm/postgresql 24.04.0 xxxxxxxxxxxx 3 days ago 268MB +ghcr.io/idaholab/malcolm/redis 24.04.0 xxxxxxxxxxxx 3 days ago 34.2MB +ghcr.io/idaholab/malcolm/suricata 24.04.0 xxxxxxxxxxxx 3 days ago 278MB +ghcr.io/idaholab/malcolm/zeek 24.04.0 xxxxxxxxxxxx 3 days ago 1GB ``` ### Import from pre-packaged tarballs diff --git a/docs/system-requirements.md b/docs/system-requirements.md index df0f4bfe3..a11571b2b 100644 --- a/docs/system-requirements.md +++ b/docs/system-requirements.md @@ -2,6 +2,6 @@ Malcolm runs on top of [Docker](https://www.docker.com/), which runs on recent releases of Linux, Apple [macOS](host-config-macos.md#HostSystemConfigMac), and [Microsoft Windows](host-config-windows.md#HostSystemConfigWindows) 10 and up. Malcolm can also be deployed in the cloud [with Kubernetes](kubernetes.md#Kubernetes). -To quote the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html), "If there is one resource that you will run out of first, it will likely be memory." Malcolm developers recommend a minimum of 8 cores and 16 gigabytes of RAM on a dedicated server. Malcolm can run on less, but more is better. Of course, users will want as much hard drive space as possible, as the amount of PCAP data a machine can analyze and store will be limited by its hard drive. +To quote the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html), "If there is one resource that you will run out of first, it will likely be memory." Malcolm requires a minimum of 8 CPU cores and 16 gigabytes of RAM on a dedicated server, but Malcolm developers recommend 16+ CPU cores and 32+ gigabytes of RAM for an optimal experience. Users will want as much available disk storage as possible (preferrably solid state storage), as the amount of PCAP data a machine can analyze and store will be limited by available storage space. Arkime's wiki has documents ([here](https://github.com/arkime/arkime#hardware-requirements) and [here](https://github.com/arkime/arkime/wiki/FAQ#what-kind-of-capture-machines-should-we-buy) and [here](https://github.com/arkime/arkime/wiki/FAQ#how-many-elasticsearch-nodes-or-machines-do-i-need) and a [calculator here](https://arkime.com/estimators)) that may be helpful, although not everything in those documents will apply to a Docker-based setup such as Malcolm. \ No newline at end of file diff --git a/docs/ubuntu-install-example.md b/docs/ubuntu-install-example.md index 6c0b3893c..34767dee9 100644 --- a/docs/ubuntu-install-example.md +++ b/docs/ubuntu-install-example.md @@ -134,7 +134,7 @@ Store PCAP, log and index files in /home/user/Malcolm? (Y / n): y Enable index management policies (ILM/ISM) in Arkime? (y / N): n -Should Malcolm delete the oldest database indices and/or PCAP files based on available storage? n +Should Malcolm delete the oldest database indices and capture artifacts based on available storage?? n Automatically analyze all PCAP files with Suricata? (Y / n): y @@ -257,25 +257,25 @@ Pulling zeek ... done user@host:~/Malcolm$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ghcr.io/idaholab/malcolm/api 24.03.1 xxxxxxxxxxxx 3 days ago 158MB -ghcr.io/idaholab/malcolm/arkime 24.03.1 xxxxxxxxxxxx 3 days ago 816MB -ghcr.io/idaholab/malcolm/dashboards 24.03.1 xxxxxxxxxxxx 3 days ago 1.02GB -ghcr.io/idaholab/malcolm/dashboards-helper 24.03.1 xxxxxxxxxxxx 3 days ago 184MB -ghcr.io/idaholab/malcolm/file-monitor 24.03.1 xxxxxxxxxxxx 3 days ago 588MB -ghcr.io/idaholab/malcolm/file-upload 24.03.1 xxxxxxxxxxxx 3 days ago 259MB -ghcr.io/idaholab/malcolm/filebeat-oss 24.03.1 xxxxxxxxxxxx 3 days ago 624MB -ghcr.io/idaholab/malcolm/freq 24.03.1 xxxxxxxxxxxx 3 days ago 132MB -ghcr.io/idaholab/malcolm/htadmin 24.03.1 xxxxxxxxxxxx 3 days ago 242MB -ghcr.io/idaholab/malcolm/logstash-oss 24.03.1 xxxxxxxxxxxx 3 days ago 1.35GB -ghcr.io/idaholab/malcolm/netbox 24.03.1 xxxxxxxxxxxx 3 days ago 1.01GB -ghcr.io/idaholab/malcolm/nginx-proxy 24.03.1 xxxxxxxxxxxx 3 days ago 121MB -ghcr.io/idaholab/malcolm/opensearch 24.03.1 xxxxxxxxxxxx 3 days ago 1.17GB -ghcr.io/idaholab/malcolm/pcap-capture 24.03.1 xxxxxxxxxxxx 3 days ago 121MB -ghcr.io/idaholab/malcolm/pcap-monitor 24.03.1 xxxxxxxxxxxx 3 days ago 213MB -ghcr.io/idaholab/malcolm/postgresql 24.03.1 xxxxxxxxxxxx 3 days ago 268MB -ghcr.io/idaholab/malcolm/redis 24.03.1 xxxxxxxxxxxx 3 days ago 34.2MB -ghcr.io/idaholab/malcolm/suricata 24.03.1 xxxxxxxxxxxx 3 days ago 278MB -ghcr.io/idaholab/malcolm/zeek 24.03.1 xxxxxxxxxxxx 3 days ago 1GB +ghcr.io/idaholab/malcolm/api 24.04.0 xxxxxxxxxxxx 3 days ago 158MB +ghcr.io/idaholab/malcolm/arkime 24.04.0 xxxxxxxxxxxx 3 days ago 816MB +ghcr.io/idaholab/malcolm/dashboards 24.04.0 xxxxxxxxxxxx 3 days ago 1.02GB +ghcr.io/idaholab/malcolm/dashboards-helper 24.04.0 xxxxxxxxxxxx 3 days ago 184MB +ghcr.io/idaholab/malcolm/file-monitor 24.04.0 xxxxxxxxxxxx 3 days ago 588MB +ghcr.io/idaholab/malcolm/file-upload 24.04.0 xxxxxxxxxxxx 3 days ago 259MB +ghcr.io/idaholab/malcolm/filebeat-oss 24.04.0 xxxxxxxxxxxx 3 days ago 624MB +ghcr.io/idaholab/malcolm/freq 24.04.0 xxxxxxxxxxxx 3 days ago 132MB +ghcr.io/idaholab/malcolm/htadmin 24.04.0 xxxxxxxxxxxx 3 days ago 242MB +ghcr.io/idaholab/malcolm/logstash-oss 24.04.0 xxxxxxxxxxxx 3 days ago 1.35GB +ghcr.io/idaholab/malcolm/netbox 24.04.0 xxxxxxxxxxxx 3 days ago 1.01GB +ghcr.io/idaholab/malcolm/nginx-proxy 24.04.0 xxxxxxxxxxxx 3 days ago 121MB +ghcr.io/idaholab/malcolm/opensearch 24.04.0 xxxxxxxxxxxx 3 days ago 1.17GB +ghcr.io/idaholab/malcolm/pcap-capture 24.04.0 xxxxxxxxxxxx 3 days ago 121MB +ghcr.io/idaholab/malcolm/pcap-monitor 24.04.0 xxxxxxxxxxxx 3 days ago 213MB +ghcr.io/idaholab/malcolm/postgresql 24.04.0 xxxxxxxxxxxx 3 days ago 268MB +ghcr.io/idaholab/malcolm/redis 24.04.0 xxxxxxxxxxxx 3 days ago 34.2MB +ghcr.io/idaholab/malcolm/suricata 24.04.0 xxxxxxxxxxxx 3 days ago 278MB +ghcr.io/idaholab/malcolm/zeek 24.04.0 xxxxxxxxxxxx 3 days ago 1GB ``` Finally, start Malcolm. When Malcolm starts it will stream informational and debug messages to the console until it has completed initializing. diff --git a/docs/zeek-intel.md b/docs/zeek-intel.md index 1d853f66a..cf71d810c 100644 --- a/docs/zeek-intel.md +++ b/docs/zeek-intel.md @@ -13,9 +13,11 @@ Note that Malcolm does not manage updates for these intelligence files. Users us Adding and deleting intelligence files under this directory will take effect upon [restarting Malcolm](running.md#StopAndRestart). Alternately, users can use the `ZEEK_INTEL_REFRESH_CRON_EXPRESSION` environment variable containing a [cron expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) to specify the interval at which the intel files should be refreshed. This can also be done manually without restarting Malcolm by running the following command from the Malcolm installation directory: ``` -docker compose exec --user $(id -u) zeek /usr/local/bin/entrypoint.sh true +docker compose exec --user $(id -u) zeek /usr/local/bin/docker_entrypoint.sh true ``` +As multiple instances of this container may be running in a Malcolm deployment (i.e., a `zeek-live` container for [monitoring local network interfaces](live-analysis.md#LocalPCAP) and a `zeek` container for scanning [uploaded PCAPs](upload.md#Upload)), only the non-live container is responsible for creating and managing the Zeek intel files, which are then shared and used by both types of container instances. + For a public example of Zeek intelligence files, see Critical Path Security's [repository](https://github.com/CriticalPathSecurity/Zeek-Intelligence-Feeds), which aggregates data from various other threat feeds into Zeek's format. ## STIX™ and TAXII™ diff --git a/file-monitor/scripts/.gitignore b/file-monitor/scripts/.gitignore deleted file mode 100644 index a2d7c8915..000000000 --- a/file-monitor/scripts/.gitignore +++ /dev/null @@ -1 +0,0 @@ -malcolm_utils.py diff --git a/file-monitor/supervisord.conf b/file-monitor/supervisord.conf index 4ca505d7a..4b8dbcea7 100644 --- a/file-monitor/supervisord.conf +++ b/file-monitor/supervisord.conf @@ -170,6 +170,24 @@ stdout_logfile=/dev/fd/1 stdout_logfile_maxbytes=0 redirect_stderr=true +[program:prune] +command=/usr/local/bin/prune_files.sh + -i %(ENV_EXTRACTED_FILE_PRUNE_INTERVAL_SECONDS)s + -m %(ENV_EXTRACTED_FILE_PRUNE_THRESHOLD_MAX_SIZE)s + -t %(ENV_EXTRACTED_FILE_PRUNE_THRESHOLD_TOTAL_DISK_USAGE_PERCENT)s + -p /zeek/extract_files + -r +autostart=true +autorestart=true +startsecs=0 +startretries=0 +stopasgroup=true +killasgroup=true +directory=/zeek/extract_files +stdout_logfile=/dev/fd/1 +stdout_logfile_maxbytes=0 +redirect_stderr=true + [program:cron] command=/usr/local/bin/supercronic -json "%(ENV_SUPERCRONIC_CRONTAB)s" autostart=true diff --git a/hedgehog-iso/arkime/Dockerfile b/hedgehog-iso/arkime/Dockerfile index 4ef89f0da..6e72fc713 100644 --- a/hedgehog-iso/arkime/Dockerfile +++ b/hedgehog-iso/arkime/Dockerfile @@ -6,7 +6,7 @@ LABEL maintainer="malcolm@inl.gov" ENV DEBIAN_FRONTEND noninteractive -ENV ARKIME_VERSION "5.0.1" +ENV ARKIME_VERSION "5.1.2" ENV ARKIME_DIR "/opt/arkime" RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sources && \ diff --git a/hedgehog-iso/arkime/build-arkime-deb.sh b/hedgehog-iso/arkime/build-arkime-deb.sh index e1b43f15e..944fe4959 100755 --- a/hedgehog-iso/arkime/build-arkime-deb.sh +++ b/hedgehog-iso/arkime/build-arkime-deb.sh @@ -4,6 +4,7 @@ ARKIME_URL="https://github.com/arkime/arkime.git" OUTPUT_DIR="/tmp" +DEBARCH=$(dpkg --print-architecture) unset VERBOSE while getopts o:v opts; do @@ -33,6 +34,10 @@ export PATH="$ARKIME_DIR/bin:/tmp/arkime-$ARKIME_VERSION/node_modules/.bin:${PAT make install +mkdir -p "${ARKIME_DIR}"/plugins +curl -fsSL -o "${ARKIME_DIR}/plugins/ja4plus.${DEBARCH}.so" "https://github.com/arkime/arkime/releases/download/v${ARKIME_VERSION}/ja4plus.${DEBARCH}.so" +[[ -f "${ARKIME_DIR}/plugins/ja4plus.${DEBARCH}.so" ]] && chmod 755 "${ARKIME_DIR}/plugins/ja4plus.${DEBARCH}.so" + cp -r ./capture/plugins/lua/samples "$ARKIME_DIR"/lua cat NOTICE release/CAPTURENOTICE > $ARKIME_DIR/NOTICE.txt diff --git a/hedgehog-iso/build.sh b/hedgehog-iso/build.sh index 201d09f78..d04bb6910 100755 --- a/hedgehog-iso/build.sh +++ b/hedgehog-iso/build.sh @@ -5,7 +5,7 @@ IMAGE_PUBLISHER=cisagov IMAGE_VERSION=1.0.0 IMAGE_DISTRIBUTION=bookworm -BEATS_VER="8.12.1" +BEATS_VER="8.13.2" BEATS_OSS="-oss" BUILD_ERROR_CODE=1 @@ -114,6 +114,17 @@ if [ -d "$WORKDIR" ]; then chown -R root:root ./config/includes.chroot/usr/local/bin/ ./config/includes.chroot/opt/zeek/bin/ rsync -a "$SCRIPT_PATH/suricata/" ./config/includes.chroot/opt/sensor/sensor_ctl/suricata/ + # assets for extracted file server + mkdir -p ./config/includes.chroot/opt/sensor/assets/img/ + rsync -a "$SCRIPT_PATH/nginx/" ./config/includes.chroot/opt/sensor/assets/ + cp "$SCRIPT_PATH"/docs/images/hedgehog/logo/favicon.ico ./config/includes.chroot/opt/sensor/assets/ + cp "$SCRIPT_PATH"/docs/images/hedgehog/logo/hedgehog-wallpaper-plain.png ./config/includes.chroot/opt/sensor/assets/img/bg-masthead.png + bash "$SCRIPT_PATH/shared/bin/web-ui-asset-download.sh" -o ./config/includes.chroot/opt/sensor/assets/css/ + chown -R root:root ./config/includes.chroot/opt/sensor/assets/css/ + find ./config/includes.chroot/opt/sensor/assets/ -type d -exec chmod 755 "{}" \; + find ./config/includes.chroot/opt/sensor/assets/ -type f -exec chmod 644 "{}" \; + ln -s -r ./config/includes.chroot/opt/sensor/assets ./config/includes.chroot/opt/sensor/assets/assets + # write out some version stuff specific to this installation version echo "BUILD_ID=\"$(date +'%Y-%m-%d')-${IMAGE_VERSION}\"" > ./config/includes.chroot/opt/sensor/.os-info echo "VARIANT=\"Hedgehog Linux (Sensor) v${IMAGE_VERSION}\"" >> ./config/includes.chroot/opt/sensor/.os-info diff --git a/hedgehog-iso/build_via_vagrant.sh b/hedgehog-iso/build_via_vagrant.sh index 73c557598..26197106f 100755 --- a/hedgehog-iso/build_via_vagrant.sh +++ b/hedgehog-iso/build_via_vagrant.sh @@ -30,6 +30,7 @@ function cleanup_shared_and_docs { "$SCRIPT_PATH"/Gemfile \ "$SCRIPT_PATH"/README.md \ "$SCRIPT_PATH"/suricata \ + "$SCRIPT_PATH"/nginx \ "$SCRIPT_PATH"/htpdate } @@ -90,8 +91,10 @@ cp -r "$SCRIPT_PATH"/../shared \ "$SCRIPT_PATH"/../README.md "$SCRIPT_PATH"/ cp "$SCRIPT_PATH"/../scripts/documentation_build.sh "$SCRIPT_PATH"/docs/ cp "$SCRIPT_PATH"/../scripts/malcolm_utils.py "$SCRIPT_PATH"/shared/bin/ -mkdir "$SCRIPT_PATH"/suricata +mkdir "$SCRIPT_PATH"/nginx "$SCRIPT_PATH"/suricata cp -r "$SCRIPT_PATH"/../suricata/rules-default "$SCRIPT_PATH"/suricata/ +cp -r "$SCRIPT_PATH"/../nginx/landingpage/css "$SCRIPT_PATH"/nginx/ +cp -r "$SCRIPT_PATH"/../nginx/landingpage/js "$SCRIPT_PATH"/nginx/ YML_IMAGE_VERSION="$(grep -P "^\s+image:.*/malcolm/" "$SCRIPT_PATH"/../docker-compose.yml | awk '{print $2}' | cut -d':' -f2 | uniq -c | sort -nr | awk '{print $2}' | head -n 1)" [[ -n $YML_IMAGE_VERSION ]] && echo "$YML_IMAGE_VERSION" > "$SCRIPT_PATH"/shared/version.txt diff --git a/hedgehog-iso/config/hooks/normal/0169-pip-installs.hook.chroot b/hedgehog-iso/config/hooks/normal/0169-pip-installs.hook.chroot index 3a217625c..981f3ffcb 100755 --- a/hedgehog-iso/config/hooks/normal/0169-pip-installs.hook.chroot +++ b/hedgehog-iso/config/hooks/normal/0169-pip-installs.hook.chroot @@ -12,9 +12,12 @@ pip3 install --break-system-packages --no-compile --no-cache-dir --force-reinsta clamd \ dateparser \ debinterface \ + dominate \ + humanfriendly \ pymisp \ python-dotenv \ ruamel.yaml \ stix2 \ + stream-zip \ taxii2-client \ watchdog diff --git a/hedgehog-iso/config/includes.chroot/etc/sudoers.d/ufw_arkime_viewer b/hedgehog-iso/config/includes.chroot/etc/sudoers.d/ufw_sensor_services similarity index 53% rename from hedgehog-iso/config/includes.chroot/etc/sudoers.d/ufw_arkime_viewer rename to hedgehog-iso/config/includes.chroot/etc/sudoers.d/ufw_sensor_services index 55f79d5e2..f89d43f29 100644 --- a/hedgehog-iso/config/includes.chroot/etc/sudoers.d/ufw_arkime_viewer +++ b/hedgehog-iso/config/includes.chroot/etc/sudoers.d/ufw_sensor_services @@ -1,2 +1,2 @@ # allow unprivileged mgmt of UFW access for the local Arkime viewer instance -%netdev ALL=(root) NOPASSWD: /usr/local/bin/ufw_allow_viewer.sh +%netdev ALL=(root) NOPASSWD: /usr/local/bin/ufw_allow_requests.sh diff --git a/hedgehog-iso/config/package-lists/system.list.chroot b/hedgehog-iso/config/package-lists/system.list.chroot index 49f26efda..ba8ebf56b 100644 --- a/hedgehog-iso/config/package-lists/system.list.chroot +++ b/hedgehog-iso/config/package-lists/system.list.chroot @@ -112,6 +112,7 @@ mcrypt md5deep menu miscfiles +mmv moreutils mtools multitail diff --git a/hedgehog-iso/interface/requirements.txt b/hedgehog-iso/interface/requirements.txt index 70bc6aa40..3f35620d4 100644 --- a/hedgehog-iso/interface/requirements.txt +++ b/hedgehog-iso/interface/requirements.txt @@ -3,8 +3,8 @@ chardet==5.1.0 click==8.1.3 Flask==2.3.2 Flask-Cors==3.0.10 -gunicorn==20.1.0 -idna==3.4 +gunicorn==22.0.0 +idna==3.7 itsdangerous==2.1.2 Jinja2==3.1.3 MarkupSafe==2.1.2 diff --git a/hedgehog-iso/interface/sensor_ctl/arkime/config.ini b/hedgehog-iso/interface/sensor_ctl/arkime/config.ini index 47936405e..2d55431ea 100644 --- a/hedgehog-iso/interface/sensor_ctl/arkime/config.ini +++ b/hedgehog-iso/interface/sensor_ctl/arkime/config.ini @@ -35,6 +35,7 @@ parseSMB=true parseSMTP=true passwordSecret=Malcolm pcapDir=/tmp +plugins= pluginsDir=/dummy/plugins reqBodyOnlyUtf8=true rirFile=/dummy/ipv4-address-space.csv diff --git a/hedgehog-iso/interface/sensor_ctl/clean.sh b/hedgehog-iso/interface/sensor_ctl/clean.sh index 047b1d220..2d253f975 100755 --- a/hedgehog-iso/interface/sensor_ctl/clean.sh +++ b/hedgehog-iso/interface/sensor_ctl/clean.sh @@ -11,11 +11,11 @@ CONTROL_VARS_FILE="control_vars.conf" source "$CONTROL_VARS_FILE" if [ -n "$PCAP_PATH" ] && [ "$PCAP_PATH" != "/" ] && [ -d "$PCAP_PATH" ] ; then - PCAP_SIZE="$(du -sh "$PCAP_PATH"/ | cut -f1)" + PCAP_SIZE="$(du -sh "$PCAP_PATH"/ 2>/dev/null | cut -f1)" rm -rf "$PCAP_PATH"/* && echo "Removed $PCAP_SIZE from packet capture path" fi if [ -n "$ZEEK_LOG_PATH" ] && [ "$ZEEK_LOG_PATH" != "/" ] && [ -d "$ZEEK_LOG_PATH" ] ; then - ZEEK_SIZE="$(du -sh "$ZEEK_LOG_PATH"/ | cut -f1)" + ZEEK_SIZE="$(du -sh "$ZEEK_LOG_PATH"/ 2>/dev/null | cut -f1)" rm -rf "$ZEEK_LOG_PATH"/* && echo "Removed $ZEEK_SIZE from Zeek log path" fi diff --git a/hedgehog-iso/interface/sensor_ctl/control_vars.conf b/hedgehog-iso/interface/sensor_ctl/control_vars.conf index 66a9ec266..bcef009a0 100644 --- a/hedgehog-iso/interface/sensor_ctl/control_vars.conf +++ b/hedgehog-iso/interface/sensor_ctl/control_vars.conf @@ -12,7 +12,6 @@ export PCAP_PRUNE_CHECK_SECONDS=60 export ARKIME_VIEWER_PORT=8005 export ARKIME_PACKET_THREADS=5 -export ARKIME_PACKET_ACL= export ARKIME_ECS_PROVIDER=arkime export ARKIME_ECS_DATASET=session export ARKIME_COMPRESSION_TYPE=zstd @@ -26,6 +25,28 @@ export ARKIME_FREESPACEG=7% export ARKIME_ROTATE_INDEX=daily export ARKIME_DEBUG_LEVEL=0 +# AUTOSTART_EXTRACTED_FILE_HTTP_SERVER below controls whether or not to serve the +# directory containing Zeek-extracted over HTTP at ./extracted-files/ +export EXTRACTED_FILE_HTTP_SERVER_PORT=8006 +export EXTRACTED_FILE_HTTP_ASSETS_DIR=/opt/sensor/assets +# Whether or not Zeek-extracted files served over HTTP will be archived in a Zip file +export EXTRACTED_FILE_HTTP_SERVER_ZIP=false +# Specifies the password for encrypted Zeek-extracted files served over HTTP +# If EXTRACTED_FILE_HTTP_SERVER_ZIP is true this is the password for the Zip file, +# otherwise it is the AES-256-CBC decryption password +export EXTRACTED_FILE_HTTP_SERVER_KEY=infected +# Whether or not to use libmagic to show MIME types for Zeek-extracted files served +export EXTRACTED_FILE_HTTP_SERVER_MAGIC=false +# HTTP server will look in subdirectories for requested filename (e.g., in "/quarantined" and "/preserved") +export EXTRACTED_FILE_HTTP_SERVER_RECURSIVE=true + +# files used for FileBeat -> Logstash TLS and extracted file HTTP server +export BEAT_LS_SSL_CLIENT_CRT=/opt/sensor/sensor_ctl/logstash-client-certificates/client.crt +export BEAT_LS_SSL_CLIENT_KEY=/opt/sensor/sensor_ctl/logstash-client-certificates/client.key +export BEAT_LS_SSL_CA_CRT=/opt/sensor/sensor_ctl/logstash-client-certificates/ca.crt + +export MALCOLM_REQUEST_ACL= +export MALCOLM_REQUEST_PORTS=$ARKIME_VIEWER_PORT,$EXTRACTED_FILE_HTTP_SERVER_PORT export DOCUMENTATION_PORT=8420 export MISCBEAT_PORT=9516 export FLUENTBIT_METRICS_INTERVAL=30 @@ -50,6 +71,8 @@ export ZEEK_AF_PACKET_BUFFER_SIZE=67108864 export ZEEK_LOCAL_NETS= export ZEEK_JSON= export ZEEK_RULESET=local +export ZEEK_INTEL_REFRESH_ON_DEPLOY=true +export ZEEK_INTEL_REFRESH_CRON_EXPRESSION= export ZEEK_INTEL_ITEM_EXPIRATION=-1min export ZEEK_INTEL_FEED_SINCE= export ZEEK_EXTRACTOR_MODE=none @@ -146,6 +169,7 @@ export ZEEK_FILE_SCAN_CAPA=false export AUTOSTART_ARKIME=false export AUTOSTART_CLAMAV_UPDATES=false +export AUTOSTART_EXTRACTED_FILE_HTTP_SERVER=false export AUTOSTART_FILEBEAT=false export AUTOSTART_FLUENTBIT_AIDE=false export AUTOSTART_FLUENTBIT_AUDITLOG=false diff --git a/hedgehog-iso/interface/sensor_ctl/supervisor.d/zeek.conf b/hedgehog-iso/interface/sensor_ctl/supervisor.d/zeek.conf index 6148fa342..519e4ecf7 100644 --- a/hedgehog-iso/interface/sensor_ctl/supervisor.d/zeek.conf +++ b/hedgehog-iso/interface/sensor_ctl/supervisor.d/zeek.conf @@ -1,5 +1,5 @@ [group:zeek] -programs=zeekctl,watcher,virustotal,clamav,yara,capa,logger +programs=zeekctl,watcher,virustotal,clamav,yara,capa,logger,fileserve [program:zeekctl] command=/opt/zeek/bin/zeekdeploy.sh @@ -101,3 +101,22 @@ autostart=%(ENV_ZEEK_FILE_WATCH)s autorestart=%(ENV_ZEEK_FILE_WATCH)s directory=%(ENV_ZEEK_LOG_PATH)s user=sensor + +[program:fileserve] +command=/usr/local/bin/extracted_files_http_server.py + --port %(ENV_EXTRACTED_FILE_HTTP_SERVER_PORT)s + --zip %(ENV_EXTRACTED_FILE_HTTP_SERVER_ZIP)s + --recursive %(ENV_EXTRACTED_FILE_HTTP_SERVER_RECURSIVE)s + --directory "%(ENV_ZEEK_LOG_PATH)s/extract_files" + --assets-directory "%(ENV_EXTRACTED_FILE_HTTP_ASSETS_DIR)s" + --tls + --tls-certfile "%(ENV_BEAT_LS_SSL_CLIENT_CRT)s" + --tls-keyfile "%(ENV_BEAT_LS_SSL_CLIENT_KEY)s" +startsecs=30 +startretries=3 +stopasgroup=true +killasgroup=true +autostart=%(ENV_AUTOSTART_EXTRACTED_FILE_HTTP_SERVER)s +autorestart=%(ENV_AUTOSTART_EXTRACTED_FILE_HTTP_SERVER)s +directory=%(ENV_ZEEK_LOG_PATH)s/extract_files +user=sensor diff --git a/hedgehog-iso/interface/sensor_ctl/supervisor.init/arkime_config_populate.sh b/hedgehog-iso/interface/sensor_ctl/supervisor.init/arkime_config_populate.sh index 41273b6b9..53077c8d1 100644 --- a/hedgehog-iso/interface/sensor_ctl/supervisor.init/arkime_config_populate.sh +++ b/hedgehog-iso/interface/sensor_ctl/supervisor.init/arkime_config_populate.sh @@ -88,6 +88,21 @@ if [[ -n $SUPERVISOR_PATH ]] && [[ -r "$SUPERVISOR_PATH"/arkime/config.ini ]]; t sed -r -i "s/(debug)\s*=\s*.*/\1=$ARKIME_DEBUG_LEVEL/" "$ARKIME_CONFIG_FILE" fi + # enable ja4+ plugin if it's present + JA4_PLUGIN_FILE="/opt/arkime/plugins/ja4plus.$(dpkg --print-architecture).so" + if [[ -f "${JA4_PLUGIN_FILE}" ]]; then + JA4_PLUGIN_FILE_BASE="$(basename "${JA4_PLUGIN_FILE}")" + JA4_PLUGIN_FILE_ESCAPED="$(echo "${JA4_PLUGIN_FILE_BASE}" | sed 's@\.@\\\.@g')" + # clean up old references to the plugin + sed -i "/plugins=.*${JA4_PLUGIN_FILE_ESCAPED}/s/;\?${JA4_PLUGIN_FILE_ESCAPED}//g" "$ARKIME_CONFIG_FILE" + # append ja4 plugin filename to end of plugins= line in config file and uncomment it if necessary + sed -i "s/^#*[[:space:]]*\(plugins=\)/\1${JA4_PLUGIN_FILE_BASE};/" "$ARKIME_CONFIG_FILE" + # squash semicolons + sed -i 's/;\{2,\}/;/g' "$ARKIME_CONFIG_FILE" + # remove trailing semicolon from plugins= line if it exists + sed -i "s/^\(plugins=.*\)[[:space:]]*;[[:space:]]*$/\1/" "$ARKIME_CONFIG_FILE" + fi + # identify node in session metadata for PCAP reachback PRIMARY_IP=$(ip route get 255.255.255.255 | grep -Po '(?<=src )(\d{1,3}.){4}' | sed "s/ //g") export ARKIME_NODE_NAME="$(hostname --long)" @@ -112,7 +127,7 @@ if [[ -n $SUPERVISOR_PATH ]] && [[ -r "$SUPERVISOR_PATH"/arkime/config.ini ]]; t fi # update the firewall ACL (via ufw) to allow retrieval of packets - sudo --non-interactive /usr/local/bin/ufw_allow_viewer.sh + sudo --non-interactive /usr/local/bin/ufw_allow_requests.sh # make sure interface flags are set appropriately for capture if [[ -n $CAPTURE_INTERFACE ]]; then diff --git a/hedgehog-iso/interface/sensor_ctl/supervisor.init/supercronic_populate.sh b/hedgehog-iso/interface/sensor_ctl/supervisor.init/suricata_update_cron_setup.sh similarity index 91% rename from hedgehog-iso/interface/sensor_ctl/supervisor.init/supercronic_populate.sh rename to hedgehog-iso/interface/sensor_ctl/supervisor.init/suricata_update_cron_setup.sh index a4eb73573..6df9390e2 100644 --- a/hedgehog-iso/interface/sensor_ctl/supervisor.init/supercronic_populate.sh +++ b/hedgehog-iso/interface/sensor_ctl/supervisor.init/suricata_update_cron_setup.sh @@ -4,9 +4,10 @@ if [[ -n $SUPERVISOR_PATH ]] && [[ -d "$SUPERVISOR_PATH"/supercronic ]]; then - # clear out supercronic crontab and repopulate based on autostart variables + # clear out suricata-update from crontab and repopulate based on autostart variables CRONTAB_PATH="$SUPERVISOR_PATH"/supercronic/crontab - > "$CRONTAB_PATH" + touch "$CRONTAB_PATH" + sed -i -e "/suricata-update/d" "$CRONTAB_PATH" # suricata updates if [[ "${AUTOSTART_SURICATA_UPDATES:-false}" == "true" ]] && \ @@ -47,4 +48,7 @@ if [[ -n $SUPERVISOR_PATH ]] && [[ -d "$SUPERVISOR_PATH"/supercronic ]]; then echo "${SURICATA_REFRESH_CRON_EXPRESSION:-15 2 * * *} /usr/bin/suricata-update --config \"$SURICATA_UPDATE_CONFIG_FILE\" --suricata-conf \"$SURICATA_CONFIG_FILE\" --data-dir \"${SURICATA_MANAGED_DIR:-/var/lib/suricata}\" $ETOPEN_FLAG" >> "$CRONTAB_PATH" fi # suricata updates + + # reload supercronic if it's running + killall -s USR2 supercronic >/dev/null 2>&1 || true fi diff --git a/hedgehog-raspi/generate-recipe.py b/hedgehog-raspi/generate-recipe.py index 4ad1535c7..b4c8db00e 100755 --- a/hedgehog-raspi/generate-recipe.py +++ b/hedgehog-raspi/generate-recipe.py @@ -92,7 +92,7 @@ # Nothing yet! extra_root_shell_cmds = [ 'cp sensor_install.sh "${ROOT?}/root/"', - '/bin/bash -c \'mkdir -p "${ROOT?}/opt/"{buildshared,deps,hooks,patches,sensor/sensor_ctl/suricata/rules-default,arkime/etc,zeek/bin}\'', + '/bin/bash -c \'mkdir -p "${ROOT?}/opt/"{sensor/assets/img,buildshared,deps,hooks,patches,sensor/sensor_ctl/suricata/rules-default,arkime/etc,zeek/bin}\'', 'cp "%s/arkime/patch/"* "${ROOT?}/opt/patches/" || true' % MALCOLM_DIR, 'cp "%s/arkime/etc/"* "${ROOT?}/opt/arkime/etc" || true' % SENSOR_DIR, 'cp -r "%s/suricata/rules-default/"* "${ROOT?}/opt/sensor/sensor_ctl/suricata/rules-default/" || true' @@ -110,6 +110,11 @@ 'cp -r "%s/config/hooks/normal/"* "${ROOT?}/opt/hooks/"' % SENSOR_DIR, 'cp -r "%s/config/package-lists/"* "${ROOT?}/opt/deps/"' % SENSOR_DIR, 'cp -r "%s/docs/images/hedgehog/logo/hedgehog-ascii-text.txt"* "${ROOT?}/root/"' % MALCOLM_DIR, + 'cp -r "%s/nginx/landingpage/css/" "${ROOT?}/opt/sensor/assets/"' % MALCOLM_DIR, + 'cp -r "%s/nginx/landingpage/js/" "${ROOT?}/opt/sensor/assets/"' % MALCOLM_DIR, + 'cp -r "%s/docs/images/hedgehog/logo/favicon.ico" "${ROOT?}/opt/sensor/assets/"' % MALCOLM_DIR, + 'cp -r "%s/docs/images/hedgehog/logo/hedgehog-wallpaper-plain.png" "${ROOT?}/opt/sensor/assets/img/bg-masthead.png"' + % MALCOLM_DIR, ] # Extend list just in case version is 4 diff --git a/hedgehog-raspi/sensor_install.sh b/hedgehog-raspi/sensor_install.sh index c5ea0837c..adafc82a0 100644 --- a/hedgehog-raspi/sensor_install.sh +++ b/hedgehog-raspi/sensor_install.sh @@ -32,7 +32,9 @@ SHARED_DIR='/opt/buildshared' WORK_DIR="$(mktemp -d -t hedgehog-XXXXXX)" SENSOR_DIR='/opt/sensor' -BEATS_VER="8.12.1" +ARKIME_VERSION="5.1.2" + +BEATS_VER="8.13.2" BEATS_OSS="-oss" # Option to build from sources if desired @@ -58,7 +60,7 @@ BUILD_ERROR_CODE=1 build_arkime(){ mkdir -p /tmp/arkime-deb - arkime_ver='5.0.1-1' + arkime_ver="${ARKIME_VERSION}-1" curl -sSL -o /tmp/arkime-deb/arkime.deb "https://github.com/arkime/arkime/releases/download/v5.0.0/arkime_${arkime_ver}.ubuntu2204_arm64.deb" dpkg -i /tmp/arkime-deb/*.deb || apt-get -f install -y --no-install-suggests } @@ -66,7 +68,7 @@ build_arkime(){ build_arkime_src(){ arkime_repo='https://github.com/arkime/arkime.git' - arkime_ver='5.0.1' + arkime_ver="${ARKIME_VERSION}" arkime_dir='/opt/arkime' build_jobs=$((PROC_CNT/2)) @@ -397,6 +399,19 @@ install_files() { curl -s -S -L -o ./oui.txt "https://www.wireshark.org/download/automated/data/manuf" popd >/dev/null 2>&1 + # download ja4+ plugin + mkdir -p /opt/arkime/plugins + pushd /opt/arkime/plugins >/dev/null 2>&1 + curl -sSL -o "/opt/arkime/plugins/ja4plus.${ARCH}.so" "https://github.com/arkime/arkime/releases/download/v$ARKIME_VERSION/ja4plus.$ARCH.so" || true + [[ -f "/opt/arkime/plugins/ja4plus.${ARCH}.so" ]] && chmod 755 "/opt/arkime/plugins/ja4plus.${ARCH}.so" + popd >/dev/null 2>&1 + + # download assets for extracted file server + /usr/local/bin/web-ui-asset-download.sh -o /opt/sensor/assets/css + find /opt/sensor/assets -type d -exec chmod 755 "{}" \; + find /opt/sensor/assets -type f -exec chmod 644 "{}" \; + ln -s -r /opt/sensor/assets /opt/sensor/assets/assets + # Prepare Fluentbit and Beats repo GPG keys local apt_lists='/etc/apt/sources.list.d' local apt_keys='/etc/apt/keyrings' diff --git a/kubernetes/03-opensearch.yml b/kubernetes/03-opensearch.yml index d33197848..66444a731 100644 --- a/kubernetes/03-opensearch.yml +++ b/kubernetes/03-opensearch.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: opensearch-container - image: ghcr.io/idaholab/malcolm/opensearch:24.03.1 + image: ghcr.io/idaholab/malcolm/opensearch:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -71,7 +71,7 @@ spec: subPath: "opensearch" initContainers: - name: opensearch-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/04-dashboards.yml b/kubernetes/04-dashboards.yml index 47f759213..07f1abe6a 100644 --- a/kubernetes/04-dashboards.yml +++ b/kubernetes/04-dashboards.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: dashboards-container - image: ghcr.io/idaholab/malcolm/dashboards:24.03.1 + image: ghcr.io/idaholab/malcolm/dashboards:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/05-upload.yml b/kubernetes/05-upload.yml index ae65af467..66d8440fd 100644 --- a/kubernetes/05-upload.yml +++ b/kubernetes/05-upload.yml @@ -34,7 +34,7 @@ spec: spec: containers: - name: upload-container - image: ghcr.io/idaholab/malcolm/file-upload:24.03.1 + image: ghcr.io/idaholab/malcolm/file-upload:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -73,7 +73,7 @@ spec: subPath: "upload" initContainers: - name: upload-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/06-pcap-monitor.yml b/kubernetes/06-pcap-monitor.yml index c14afabbd..e73861876 100644 --- a/kubernetes/06-pcap-monitor.yml +++ b/kubernetes/06-pcap-monitor.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: pcap-monitor-container - image: ghcr.io/idaholab/malcolm/pcap-monitor:24.03.1 + image: ghcr.io/idaholab/malcolm/pcap-monitor:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -70,7 +70,7 @@ spec: name: pcap-monitor-zeek-volume initContainers: - name: pcap-monitor-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/07-arkime.yml b/kubernetes/07-arkime.yml index 2e8b35855..4cf5aeffc 100644 --- a/kubernetes/07-arkime.yml +++ b/kubernetes/07-arkime.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: arkime-container - image: ghcr.io/idaholab/malcolm/arkime:24.03.1 + image: ghcr.io/idaholab/malcolm/arkime:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -79,7 +79,7 @@ spec: name: arkime-pcap-volume initContainers: - name: arkime-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/08-api.yml b/kubernetes/08-api.yml index 457a81a37..3a5312a12 100644 --- a/kubernetes/08-api.yml +++ b/kubernetes/08-api.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: api-container - image: ghcr.io/idaholab/malcolm/api:24.03.1 + image: ghcr.io/idaholab/malcolm/api:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/09-dashboards-helper.yml b/kubernetes/09-dashboards-helper.yml index aa03d1cab..91dc8fc08 100644 --- a/kubernetes/09-dashboards-helper.yml +++ b/kubernetes/09-dashboards-helper.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: dashboards-helper-container - image: ghcr.io/idaholab/malcolm/dashboards-helper:24.03.1 + image: ghcr.io/idaholab/malcolm/dashboards-helper:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/10-zeek.yml b/kubernetes/10-zeek.yml index 92f5e0b80..f8dfba8c1 100644 --- a/kubernetes/10-zeek.yml +++ b/kubernetes/10-zeek.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: zeek-offline-container - image: ghcr.io/idaholab/malcolm/zeek:24.03.1 + image: ghcr.io/idaholab/malcolm/zeek:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -64,7 +64,7 @@ spec: subPath: "zeek/intel" initContainers: - name: zeek-offline-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/11-suricata.yml b/kubernetes/11-suricata.yml index df5e97054..b8ed268a7 100644 --- a/kubernetes/11-suricata.yml +++ b/kubernetes/11-suricata.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: suricata-offline-container - image: ghcr.io/idaholab/malcolm/suricata:24.03.1 + image: ghcr.io/idaholab/malcolm/suricata:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -55,7 +55,7 @@ spec: name: suricata-offline-custom-configs-volume initContainers: - name: suricata-offline-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/12-file-monitor.yml b/kubernetes/12-file-monitor.yml index bc55fb0e0..44071e816 100644 --- a/kubernetes/12-file-monitor.yml +++ b/kubernetes/12-file-monitor.yml @@ -33,7 +33,7 @@ spec: spec: containers: - name: file-monitor-container - image: ghcr.io/idaholab/malcolm/file-monitor:24.03.1 + image: ghcr.io/idaholab/malcolm/file-monitor:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -83,7 +83,7 @@ spec: name: file-monitor-yara-rules-custom-volume initContainers: - name: file-monitor-live-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/13-filebeat.yml b/kubernetes/13-filebeat.yml index c9eb11c99..de2290c40 100644 --- a/kubernetes/13-filebeat.yml +++ b/kubernetes/13-filebeat.yml @@ -33,7 +33,7 @@ spec: spec: containers: - name: filebeat-container - image: ghcr.io/idaholab/malcolm/filebeat-oss:24.03.1 + image: ghcr.io/idaholab/malcolm/filebeat-oss:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -83,7 +83,7 @@ spec: subPath: "nginx" initContainers: - name: filebeat-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/14-logstash.yml b/kubernetes/14-logstash.yml index ac26d7f2f..dda6ca561 100644 --- a/kubernetes/14-logstash.yml +++ b/kubernetes/14-logstash.yml @@ -49,7 +49,7 @@ spec: # topologyKey: "kubernetes.io/hostname" containers: - name: logstash-container - image: ghcr.io/idaholab/malcolm/logstash-oss:24.03.1 + image: ghcr.io/idaholab/malcolm/logstash-oss:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -115,7 +115,7 @@ spec: subPath: "logstash" initContainers: - name: logstash-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/15-netbox-redis.yml b/kubernetes/15-netbox-redis.yml index e0056f3c8..5c8d1b2e1 100644 --- a/kubernetes/15-netbox-redis.yml +++ b/kubernetes/15-netbox-redis.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: netbox-redis-container - image: ghcr.io/idaholab/malcolm/redis:24.03.1 + image: ghcr.io/idaholab/malcolm/redis:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -83,7 +83,7 @@ spec: subPath: netbox/redis initContainers: - name: netbox-redis-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/16-netbox-redis-cache.yml b/kubernetes/16-netbox-redis-cache.yml index 84dcffb87..5a03e6595 100644 --- a/kubernetes/16-netbox-redis-cache.yml +++ b/kubernetes/16-netbox-redis-cache.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: netbox-redis-cache-container - image: ghcr.io/idaholab/malcolm/redis:24.03.1 + image: ghcr.io/idaholab/malcolm/redis:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/17-netbox-postgres.yml b/kubernetes/17-netbox-postgres.yml index 587c4aa72..3e8b801ca 100644 --- a/kubernetes/17-netbox-postgres.yml +++ b/kubernetes/17-netbox-postgres.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: netbox-postgres-container - image: ghcr.io/idaholab/malcolm/postgresql:24.03.1 + image: ghcr.io/idaholab/malcolm/postgresql:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -74,7 +74,7 @@ spec: subPath: netbox/postgres initContainers: - name: netbox-postgres-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/18-netbox.yml b/kubernetes/18-netbox.yml index 6a3f4c8f6..29e9d3ff5 100644 --- a/kubernetes/18-netbox.yml +++ b/kubernetes/18-netbox.yml @@ -36,7 +36,7 @@ spec: spec: containers: - name: netbox-container - image: ghcr.io/idaholab/malcolm/netbox:24.03.1 + image: ghcr.io/idaholab/malcolm/netbox:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -88,7 +88,7 @@ spec: subPath: netbox/media initContainers: - name: netbox-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/19-htadmin.yml b/kubernetes/19-htadmin.yml index 2a2015ce0..165f90cbb 100644 --- a/kubernetes/19-htadmin.yml +++ b/kubernetes/19-htadmin.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: htadmin-container - image: ghcr.io/idaholab/malcolm/htadmin:24.03.1 + image: ghcr.io/idaholab/malcolm/htadmin:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -63,7 +63,7 @@ spec: subPath: "htadmin" initContainers: - name: htadmin-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/20-pcap-capture.yml b/kubernetes/20-pcap-capture.yml index 35a46368c..e7a8fe24c 100644 --- a/kubernetes/20-pcap-capture.yml +++ b/kubernetes/20-pcap-capture.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: pcap-capture-container - image: ghcr.io/idaholab/malcolm/pcap-capture:24.03.1 + image: ghcr.io/idaholab/malcolm/pcap-capture:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -50,7 +50,7 @@ spec: subPath: "upload" initContainers: - name: pcap-capture-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/21-zeek-live.yml b/kubernetes/21-zeek-live.yml index 92984ceb0..f28c3b4ff 100644 --- a/kubernetes/21-zeek-live.yml +++ b/kubernetes/21-zeek-live.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: zeek-live-container - image: ghcr.io/idaholab/malcolm/zeek:24.03.1 + image: ghcr.io/idaholab/malcolm/zeek:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -61,7 +61,7 @@ spec: subPath: "zeek/intel" initContainers: - name: zeek-live-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/22-suricata-live.yml b/kubernetes/22-suricata-live.yml index 2f1b95d57..6c1cf5d3b 100644 --- a/kubernetes/22-suricata-live.yml +++ b/kubernetes/22-suricata-live.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: suricata-live-container - image: ghcr.io/idaholab/malcolm/suricata:24.03.1 + image: ghcr.io/idaholab/malcolm/suricata:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -56,7 +56,7 @@ spec: name: suricata-live-custom-configs-volume initContainers: - name: suricata-live-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/23-arkime-live.yml b/kubernetes/23-arkime-live.yml index c3f30ec4c..21e0b8c83 100644 --- a/kubernetes/23-arkime-live.yml +++ b/kubernetes/23-arkime-live.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: arkime-live-container - image: ghcr.io/idaholab/malcolm/arkime:24.03.1 + image: ghcr.io/idaholab/malcolm/arkime:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -62,7 +62,7 @@ spec: name: arkime-live-pcap-volume initContainers: - name: arkime-live-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/24-freq.yml b/kubernetes/24-freq.yml index 465744da9..2e0630b2d 100644 --- a/kubernetes/24-freq.yml +++ b/kubernetes/24-freq.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: freq-container - image: ghcr.io/idaholab/malcolm/freq:24.03.1 + image: ghcr.io/idaholab/malcolm/freq:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/98-nginx-proxy.yml b/kubernetes/98-nginx-proxy.yml index 44ecd11fb..11c2bfca8 100644 --- a/kubernetes/98-nginx-proxy.yml +++ b/kubernetes/98-nginx-proxy.yml @@ -39,7 +39,7 @@ spec: spec: containers: - name: nginx-proxy-container - image: ghcr.io/idaholab/malcolm/nginx-proxy:24.03.1 + image: ghcr.io/idaholab/malcolm/nginx-proxy:24.04.0 imagePullPolicy: Always stdin: false tty: true @@ -99,7 +99,7 @@ spec: subPath: "nginx" initContainers: - name: nginx-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:24.03.1 + image: ghcr.io/idaholab/malcolm/dirinit:24.04.0 imagePullPolicy: Always stdin: false tty: true diff --git a/logstash/pipelines/enrichment/21_netbox.conf b/logstash/pipelines/enrichment/21_netbox.conf index 38bca294c..72b5be7d6 100644 --- a/logstash/pipelines/enrichment/21_netbox.conf +++ b/logstash/pipelines/enrichment/21_netbox.conf @@ -13,7 +13,7 @@ filter { ruby { id => "ruby_determine_netbox_suitability" # @logtypes = {"suricata"=>["alert"], "zeek"=>["conn", "known_hosts", "known_services", "notice", "signatures", "software", "weird"]} - init => "logtypesStr = ENV['LOGSTASH_NETBOX_ENRICHMENT_DATASETS'] || 'suricata.alert,zeek.conn,zeek.known_hosts,zeek.known_services,zeek.notice,zeek.signatures,zeek.software,zeek.weird' ; logtypesArr = logtypesStr.gsub(/\s+/, '').split(','); @logtypes = logtypesArr.group_by { |logtype| logtype.split('.').first }.transform_values { |values| values.map { |v| v.split('.')[1] } }" + init => "logtypesStr = ENV['LOGSTASH_NETBOX_ENRICHMENT_DATASETS'] || 'suricata.alert,zeek.conn,zeek.dhcp,zeek.dns,zeek.known_hosts,zeek.known_services,zeek.ntlm,zeek.notice,zeek.signatures,zeek.software,zeek.weird' ; logtypesArr = logtypesStr.gsub(/\s+/, '').split(','); @logtypes = logtypesArr.group_by { |logtype| logtype.split('.').first }.transform_values { |values| values.map { |v| v.split('.')[1] } }" code => " provider = event.get('[event][provider]').to_s dataset = event.get('[event][dataset]').to_s @@ -28,18 +28,176 @@ filter { } if ([@metadata][do_netbox_enrichment]) { + + # ################################################################################################ + # before we do the actual enrichments, we've got a few log types we can use to map IP addresses + # to hostnames in autopopulation + + # for these ones while we'll create the device entry with an IP address + # and hostname, additional details (such as the manufacturer based + # on MAC address) will need to be updated later (also note the blank + # "target" which means this record will be used to populate the netbox + # database, but nothing will actually be stored in a field of the + # record itself as a result of this filter) + + if ([dns][question][name]) and ([dns][resolved_ip]) { + ruby { + id => "ruby_netbox_enrich_dns_ip_to_host" + path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" + script_params => { + "lookup_type" => "ip_device" + "source" => "[dns][resolved_ip]" + "source_hostname" => "[dns][question][name]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" + "lookup_site_env" => "NETBOX_DEFAULT_SITE" + "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "NETBOX_CACHE_SIZE" + "cache_ttl_env" => "NETBOX_CACHE_TTL" + "autopopulate_env" => "NETBOX_AUTO_POPULATE" + "default_manuf_env" => "NETBOX_DEFAULT_MANUFACTURER" + "default_dtype_env" => "NETBOX_DEFAULT_DEVICE_TYPE" + "default_role_env" => "NETBOX_DEFAULT_ROLE" + } + } + } + + if ([zeek][ntlm]) { + + if ([zeek][ntlm][host]) and ([source][ip]) { + ruby { + id => "ruby_netbox_enrich_ntlm_host_to_ip" + path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" + script_params => { + "lookup_type" => "ip_device" + "source" => "[source][ip]" + "source_hostname" => "[zeek][ntlm][host]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" + "lookup_site_env" => "NETBOX_DEFAULT_SITE" + "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "NETBOX_CACHE_SIZE" + "cache_ttl_env" => "NETBOX_CACHE_TTL" + "autopopulate_env" => "NETBOX_AUTO_POPULATE" + "default_manuf_env" => "NETBOX_DEFAULT_MANUFACTURER" + "default_dtype_env" => "NETBOX_DEFAULT_DEVICE_TYPE" + "default_role_env" => "NETBOX_DEFAULT_ROLE" + } + } + } # ([zeek][ntlm][host]) and ([source][ip]) + + if ([destination][ip]) { + if ([zeek][ntlm][server_nb_computer]) { + ruby { + id => "ruby_netbox_enrich_ntlm_server_nb_computer_to_ip" + path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" + script_params => { + "lookup_type" => "ip_device" + "source" => "[destination][ip]" + "source_hostname" => "[zeek][ntlm][server_nb_computer]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" + "lookup_site_env" => "NETBOX_DEFAULT_SITE" + "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "NETBOX_CACHE_SIZE" + "cache_ttl_env" => "NETBOX_CACHE_TTL" + "autopopulate_env" => "NETBOX_AUTO_POPULATE" + "default_manuf_env" => "NETBOX_DEFAULT_MANUFACTURER" + "default_dtype_env" => "NETBOX_DEFAULT_DEVICE_TYPE" + "default_role_env" => "NETBOX_DEFAULT_ROLE" + } + } + } else if ([zeek][ntlm][server_dns_computer]) { + ruby { + id => "ruby_netbox_enrich_ntlm_server_dns_computer_to_ip" + path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" + script_params => { + "lookup_type" => "ip_device" + "source" => "[destination][ip]" + "source_hostname" => "[zeek][ntlm][server_dns_computer]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" + "lookup_site_env" => "NETBOX_DEFAULT_SITE" + "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "NETBOX_CACHE_SIZE" + "cache_ttl_env" => "NETBOX_CACHE_TTL" + "autopopulate_env" => "NETBOX_AUTO_POPULATE" + "default_manuf_env" => "NETBOX_DEFAULT_MANUFACTURER" + "default_dtype_env" => "NETBOX_DEFAULT_DEVICE_TYPE" + "default_role_env" => "NETBOX_DEFAULT_ROLE" + } + } + } + } # [destination][ip] + } # ntlm + + + if ([zeek][dhcp][assigned_ip]) { + if ([zeek][dhcp][client_fqdn]) { + ruby { + id => "ruby_netbox_enrich_dhcp_client_fqdn_to_ip" + path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" + script_params => { + "lookup_type" => "ip_device" + "source" => "[zeek][dhcp][assigned_addr]" + "source_hostname" => "[zeek][dhcp][client_fqdn]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" + "lookup_site_env" => "NETBOX_DEFAULT_SITE" + "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "NETBOX_CACHE_SIZE" + "cache_ttl_env" => "NETBOX_CACHE_TTL" + "autopopulate_env" => "NETBOX_AUTO_POPULATE" + "default_manuf_env" => "NETBOX_DEFAULT_MANUFACTURER" + "default_dtype_env" => "NETBOX_DEFAULT_DEVICE_TYPE" + "default_role_env" => "NETBOX_DEFAULT_ROLE" + } + } + } else if ([zeek][dhcp][host_name]) { + ruby { + id => "ruby_netbox_enrich_dhcp_host_name_to_ip" + path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" + script_params => { + "lookup_type" => "ip_device" + "source" => "[zeek][dhcp][assigned_addr]" + "source_hostname" => "[zeek][dhcp][host_name]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" + "lookup_site_env" => "NETBOX_DEFAULT_SITE" + "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "NETBOX_CACHE_SIZE" + "cache_ttl_env" => "NETBOX_CACHE_TTL" + "autopopulate_env" => "NETBOX_AUTO_POPULATE" + "default_manuf_env" => "NETBOX_DEFAULT_MANUFACTURER" + "default_dtype_env" => "NETBOX_DEFAULT_DEVICE_TYPE" + "default_role_env" => "NETBOX_DEFAULT_ROLE" + } + } + } + } + + # ################################################################################################ + # now we're doing the actual enrichment lookups (i.e., we have a target) + if ([source][ip]) and (([network][direction] == "internal") or ([network][direction] == "outbound")) { ruby { id => "ruby_netbox_enrich_source_ip_segment" path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" script_params => { - "enabled_env" => "NETBOX_ENRICHMENT" - "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" "source" => "[source][ip]" "target" => "[source][segment]" - "auto_prefix_env" => "NETBOX_AUTO_CREATE_PREFIX" "lookup_type" => "ip_prefix" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" + "auto_prefix_env" => "NETBOX_AUTO_CREATE_PREFIX" "lookup_site_env" => "NETBOX_DEFAULT_SITE" "netbox_token_env" => "SUPERUSER_API_TOKEN" "cache_size_env" => "NETBOX_CACHE_SIZE" @@ -50,13 +208,16 @@ filter { id => "ruby_netbox_enrich_source_ip_device" path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" script_params => { - "enabled_env" => "NETBOX_ENRICHMENT" - "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "lookup_type" => "ip_device" "source" => "[source][ip]" "target" => "[source][device]" - "lookup_type" => "ip_device" + "source_oui" => "[source][oui]" + "source_mac" => "[source][mac]" + "source_hostname" => "[source][ip_reverse_dns]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" "lookup_site_env" => "NETBOX_DEFAULT_SITE" - "lookup_service" => "false" "netbox_token_env" => "SUPERUSER_API_TOKEN" "cache_size_env" => "NETBOX_CACHE_SIZE" "cache_ttl_env" => "NETBOX_CACHE_TTL" @@ -66,8 +227,6 @@ filter { "default_role_env" => "NETBOX_DEFAULT_ROLE" "autopopulate_fuzzy_threshold_env" => "NETBOX_DEFAULT_FUZZY_THRESHOLD" "autopopulate_create_manuf_env" => "NETBOX_DEFAULT_AUTOCREATE_MANUFACTURER" - "source_oui" => "[source][oui]" - "source_mac" => "[source][mac]" } } } @@ -77,12 +236,13 @@ filter { id => "ruby_netbox_enrich_destination_ip_segment" path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" script_params => { - "enabled_env" => "NETBOX_ENRICHMENT" - "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "lookup_type" => "ip_prefix" "source" => "[destination][ip]" "target" => "[destination][segment]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" "auto_prefix_env" => "NETBOX_AUTO_CREATE_PREFIX" - "lookup_type" => "ip_prefix" "lookup_site_env" => "NETBOX_DEFAULT_SITE" "netbox_token_env" => "SUPERUSER_API_TOKEN" "cache_size_env" => "NETBOX_CACHE_SIZE" @@ -93,11 +253,15 @@ filter { id => "ruby_netbox_enrich_destination_ip_device" path => "/usr/share/logstash/malcolm-ruby/netbox_enrich.rb" script_params => { - "enabled_env" => "NETBOX_ENRICHMENT" - "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "lookup_type" => "ip_device" "source" => "[destination][ip]" "target" => "[destination][device]" - "lookup_type" => "ip_device" + "source_oui" => "[destination][oui]" + "source_mac" => "[destination][mac]" + "source_hostname" => "[destination][ip_reverse_dns]" + "enabled_env" => "NETBOX_ENRICHMENT" + "verbose_env" => "NETBOX_ENRICHMENT_VERBOSE" + "debug_env" => "NETBOX_ENRICHMENT_DEBUG" "lookup_site_env" => "NETBOX_DEFAULT_SITE" "lookup_service_env" => "NETBOX_ENRICHMENT_LOOKUP_SERVICE" "lookup_service_port_source" => "[destination][port]" @@ -110,8 +274,6 @@ filter { "default_role_env" => "NETBOX_DEFAULT_ROLE" "autopopulate_fuzzy_threshold_env" => "NETBOX_DEFAULT_FUZZY_THRESHOLD" "autopopulate_create_manuf_env" => "NETBOX_DEFAULT_AUTOCREATE_MANUFACTURER" - "source_oui" => "[destination][oui]" - "source_mac" => "[destination][mac]" } } } diff --git a/logstash/pipelines/zeek/12_zeek_mutate.conf b/logstash/pipelines/zeek/12_zeek_mutate.conf index 32388c8ca..f4b8d0d29 100644 --- a/logstash/pipelines/zeek/12_zeek_mutate.conf +++ b/logstash/pipelines/zeek/12_zeek_mutate.conf @@ -16,6 +16,11 @@ filter { if (![event][provider]) { mutate { id => "mutate_add_field_event_provider_zeek" add_field => { "[event][provider]" => "zeek" } } } + if (![host][name]) and ([agent][hostname]) { + mutate { id => "mutate_zeek_add_field_host_name_agent_hostname" + add_field => { "[host][name]" => "%{[agent][hostname]}" } } + } + # rename the zeek child array to match the log type mutate { id => "mutate_rename_zeek_log_type" rename => { "[zeek_cols]" => "[zeek][%{[log_source]}]" } } @@ -560,6 +565,25 @@ filter { if ([zeek][files][sha256]) { mutate { id => "mutate_add_field_ecs_files_hash_sha256" add_field => { "[file][hash][sha256]" => "%{[zeek][files][sha256]}" } } } + if ([zeek][files][extracted]) { + ruby { + id => "ruby_zeek_files_extracted_uri_build" + code => " + uri = nil + if (fName = event.get('[zeek][files][extracted]')) then + if (tags = event.get('[tags]')) && tags.include?('_filebeat_zeek_hedgehog_live') then + if (hName = event.get('[host][name]')) then + uri = 'hh-extracted-files/' + hName + '/' + fName + end + else + uri = 'extracted-files/' + fName + end + end + event.set('[zeek][files][extracted_uri]', uri) unless uri.nil? or (uri.length == 0) + " + } + } + } else if ([log_source] == "ftp") { ############################################################################################################################# # ftp.log specific logic diff --git a/logstash/ruby/netbox_enrich.rb b/logstash/ruby/netbox_enrich.rb index d5d38d808..1a7d07684 100644 --- a/logstash/ruby/netbox_enrich.rb +++ b/logstash/ruby/netbox_enrich.rb @@ -95,6 +95,13 @@ def register( end @verbose = [1, true, '1', 'true', 't', 'on', 'enabled'].include?(_verbose_str.to_s.downcase) + _debug_str = params["debug"] + _debug_env = params["debug_env"] + if _debug_str.nil? && !_debug_env.nil? + _debug_str = ENV[_debug_env] + end + @debug = [1, true, '1', 'true', 't', 'on', 'enabled'].include?(_debug_str.to_s.downcase) + # connection URL for netbox @netbox_url = params.fetch("netbox_url", "http://netbox:8080/netbox/api").delete_suffix("/") @netbox_url_suffix = "/netbox/api" @@ -129,6 +136,7 @@ def register( @source_oui = params["source_oui"] @source_mac = params["source_mac"] @source_segment = params["source_segment"] + @default_status = params.fetch("default_status", "active").to_sym # default manufacturer, role and device type if not specified, either specified directly or read from ENVs @default_manuf = params["default_manuf"] @@ -252,6 +260,17 @@ def register( @nb_headers = { 'Content-Type': 'application/json' }.freeze + @device_tag_autopopulated = { 'slug': 'malcolm-autopopulated' }.freeze + # for ip_device hash lookups, if a device is pulled out that has one of these tags + # it should be *updated* instead of just created. this allows us to create even less-fleshed + # out device entries from things like DNS entries but then give more information (like + # manufacturer) later on when actual traffic is observed. these values should match + # what's in netbox/preload/tags.yml + @device_tag_manufacturer_unknown = { 'slug': 'manufacturer-unknown' }.freeze + @device_tag_hostname_unknown = { 'slug': 'hostname-unknown' }.freeze + + @virtual_machine_device_type_name = "Virtual Machine".freeze + end def filter( @@ -262,126 +281,78 @@ def filter( return [event] end - _key_ip = IPAddr.new(_key) rescue nil - _lookup_service_port = (@lookup_service ? event.get("#{@lookup_service_port_source}") : nil).to_i - _autopopulate_default_manuf = (@default_manuf.nil? || @default_manuf.empty?) ? "Unspecified" : @default_manuf - _autopopulate_default_role = (@default_role.nil? || @default_role.empty?) ? "Unspecified" : @default_role - _autopopulate_default_dtype = (@default_dtype.nil? || @default_dtype.empty?) ? "Unspecified" : @default_dtype - _autopopulate_default_site = (@lookup_site.nil? || @lookup_site.empty?) ? "default" : @lookup_site - _autopopulate_hostname = event.get("#{@source_hostname}") - _autopopulate_mac = event.get("#{@source_mac}") - _autopopulate_oui = event.get("#{@source_oui}") - - _result = @cache_hash.getset(@lookup_type){ - LruRedux::TTL::ThreadSafeCache.new(@cache_size, @cache_ttl) - }.getset(_key){ - - _nb = Faraday.new(@netbox_url) do |conn| - conn.request :authorization, 'Token', @netbox_token - conn.request :url_encoded - conn.response :json, :parser_options => { :symbolize_names => true } - end + # _key might be an array of IP addresses, but we're only going to set the first _result into @target. + # this is still useful, though as autopopulation may happen for multiple IPs even if we only + # store the result of the first one found + if !_key.is_a?(Array) then + _newKey = Array.new + _newKey.push(_key) unless _key.nil? + _key = _newKey + end + _result_set = false - _lookup_result = nil - _autopopulate_device = nil - _autopopulate_role = nil - _autopopulate_dtype = nil - _autopopulate_manuf = nil - _autopopulate_site = nil - _prefixes = nil - _devices = nil - - # handle :ip_device first, because if we're doing autopopulate we're also going to use - # some of the logic from :ip_prefix - - if (@lookup_type == :ip_device) - ################################################################################# - # retrieve the list of IP addresses where address matches the search key, limited to "assigned" addresses. - # then, for those IP addresses, search for devices pertaining to the interfaces assigned to each - # IP address (e.g., ipam.ip_address -> dcim.interface -> dcim.device, or - # ipam.ip_address -> virtualization.interface -> virtualization.virtual_machine) - _devices = lookup_devices(_key, @lookup_site, _lookup_service_port, @netbox_url_base, @netbox_url_suffix, _nb) - - if @autopopulate && (_devices.nil? || _devices.empty?) && _key_ip&.private? - # no results found, autopopulate enabled, private-space IP address... - # let's create an entry for this device - _autopopulate_device, - _autopopulate_role, - _autopopulate_dtype, - _autopopulate_oui, - _autopopulate_manuf, - _autopopulate_site = autopopulate_devices(_key, - _autopopulate_mac, - _autopopulate_oui, - _autopopulate_default_site, - _autopopulate_default_role, - _autopopulate_default_dtype, - _autopopulate_default_manuf, - _autopopulate_hostname, - _nb) - if !_autopopulate_device.nil? - # puts('5. %{key}: %{found}' % { key: autopopulate_oui, found: JSON.generate(_autopopulate_manuf) }) - # we created a device, so send it back out as the result for the event as well - _devices = Array.new unless _devices.is_a?(Array) - _devices << { :name => _autopopulate_device&.fetch(:name, _autopopulate_device&.fetch(:display, nil)), - :id => _autopopulate_device&.fetch(:id, nil), - :url => _autopopulate_device&.fetch(:url, nil), - :site => _autopopulate_site&.fetch(:name, nil), - :role => _autopopulate_role&.fetch(:name, nil), - :device_type => _autopopulate_dtype&.fetch(:name, nil), - :manufacturer => _autopopulate_manuf&.fetch(:name, nil), - :details => @verbose ? _autopopulate_device : nil } - end # _autopopulate_device was not nil (i.e., we autocreated a device) - end # _autopopulate turned on and no results found - - _devices = collect_values(crush(_devices)) - _devices.fetch(:service, [])&.flatten!&.uniq! - _lookup_result = _devices - end # @lookup_type == :ip_device - - # this || is because we are going to need to do the prefix lookup if we're autopopulating - # as well as if we're specifically requested to do that enrichment - - if (@lookup_type == :ip_prefix) || !_autopopulate_device.nil? - ################################################################################# - # retrieve the list of IP address prefixes containing the search key - _prefixes = lookup_prefixes(_key, @lookup_site, _nb) - - # TODO: ipv6? - if (_prefixes.nil? || _prefixes.empty?) && !_key_ip&.ipv6? && _key_ip&.private? && @autopopulate_create_prefix - # we didn't find a prefix containing this private-space IPv4 address and auto-create is true - _prefix_info = autopopulate_prefixes(_key_ip, _autopopulate_default_site, _nb) - _prefixes = Array.new unless _prefixes.is_a?(Array) - _prefixes << _prefix_info - end # if auto-create prefix - - _prefixes = collect_values(crush(_prefixes)) - _lookup_result = _prefixes unless (@lookup_type != :ip_prefix) - end # @lookup_type == :ip_prefix - - if !_autopopulate_device.nil? && _autopopulate_device.fetch(:id, nil)&.nonzero? - # device has been created, we need to create an interface for it - _autopopulate_device = create_device_interface(_key, - _autopopulate_device, - _autopopulate_manuf, - _autopopulate_mac, - _nb) - end # check if device was created and has ID - - # yield return value for cache_hash getset - _lookup_result - } - - if !_result.nil? && _result.has_key?(:url) && !_result[:url]&.empty? - _result[:url].map! { |u| u.delete_prefix(@netbox_url_base).gsub('/api/', '/') } - if (@lookup_type == :ip_device) && - (!_result.has_key?(:device_type) || _result[:device_type]&.empty?) && - _result[:url].any? { |u| u.include? "virtual-machines" } - then - _result[:device_type] = [ "Virtual Machine" ] + _key.each do |ip_key| + + _lookup_hash = @cache_hash.getset(@lookup_type){ LruRedux::TTL::ThreadSafeCache.new(@cache_size, @cache_ttl) } + _result = _lookup_hash.getset(ip_key){ netbox_lookup(:event=>event, :ip_key=>ip_key) }.dup + + if !_result.nil? + + # we've done a lookup and got (or autopopulated) our answer, however, if this is a device lookup and + # either the hostname-unknown or manufacturer-unknown is set, we should see if we can update it + if (_tags = _result.fetch(:tags, nil)) && + @autopopulate && + (@lookup_type == :ip_device) && + _tags.is_a?(Array) && + _tags.flatten! && + _tags.all? { |item| item.is_a?(Hash) } && + _tags.any? {|tag| tag[:slug] == @device_tag_autopopulated[:slug]} + then + _updated_result = nil + _autopopulate_hostname = event.get("#{@source_hostname}").to_s + _autopopulate_mac = event.get("#{@source_mac}").to_s.downcase + _autopopulate_oui = event.get("#{@source_oui}").to_s + if ((_tags.any? {|tag| tag[:slug] == @device_tag_hostname_unknown[:slug]} && + (!_autopopulate_hostname.empty? && !_autopopulate_hostname.end_with?('.in-addr.arpa'))) || + (_tags.any? {|tag| tag[:slug] == @device_tag_manufacturer_unknown[:slug]} && + ((!_autopopulate_mac.empty? && (_autopopulate_mac != 'ff:ff:ff:ff:ff:ff') && (_autopopulate_mac != '00:00:00:00:00:00')) || + !_autopopulate_oui.empty?))) + then + # the hostname-unknown tag is set, but we appear to have a hostname + # from the event. we need to update the record in netbox (set the new hostname + # from this value and remove the tag) and in the result + # OR + # the manufacturer-unknown tag is set, but we appear to have an OUI or MAC address + # from the event. we need to update the record in netbox (determine the manufacturer + # from this value and remove the tag) and in the result + _updated_result = netbox_lookup(:event=>event, :ip_key=>ip_key, :previous_result=>_result) + puts('filter tried to patch %{name} for "%{tags}" ("%{host}", "%{mac}", "%{oui}"): %{result}' % { + name: ip_key, + tags: _tags.map{ |hash| hash[:slug] }.join('|'), + host: _autopopulate_hostname, + mac: _autopopulate_mac, + oui: _autopopulate_oui, + result: JSON.generate(_updated_result) }) if @debug + end + _lookup_hash[ip_key] = (_result = _updated_result) if _updated_result + end + _result.delete(:tags) + + if _result.has_key?(:url) && !_result[:url]&.empty? + _result[:url].map! { |u| u.delete_prefix(@netbox_url_base).gsub('/api/', '/') } + if (@lookup_type == :ip_device) && + (!_result.has_key?(:device_type) || _result[:device_type]&.empty?) && + _result[:url].any? { |u| u.include? "virtual-machines" } + then + _result[:device_type] = [ @virtual_machine_device_type_name ] + end + end end - end - event.set("#{@target}", _result) unless _result.nil? || _result.empty? + unless _result_set || _result.nil? || _result.empty? || @target.nil? || @target.empty? + event.set("#{@target}", _result) + _result_set = true + end + end # _key.each do |ip_key| [event] end @@ -392,6 +363,34 @@ def mac_string_to_integer( string.tr('.:-','').to_i(16) end +def mac_to_oui_lookup( + mac +) + _oui = nil + + case mac + when String + if @macregex.match?(mac) + _macint = mac_string_to_integer(mac) + _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} + _oui = _vendor[2] unless _vendor.nil? + end # mac matches @macregex + when Array + mac.each do |_addr| + if @macregex.match?(_addr) + _macint = mac_string_to_integer(_addr) + _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} + if !_vendor.nil? + _oui = _vendor[2] + break + end # !_vendor.nil? + end # _addr matches @macregex + end # mac.each do + end # case statement mac String vs. Array + + _oui +end + def psych_load_yaml( filename ) @@ -453,87 +452,213 @@ def lookup_or_create_site( site_name, nb ) - @site_hash.getset(site_name) { - begin - _site = nil - - # look it up first - _query = { :offset => 0, - :limit => 1, - :name => site_name } - if (_sites_response = nb.get('dcim/sites/', _query).body) && - _sites_response.is_a?(Hash) && - (_tmp_sites = _sites_response.fetch(:results, [])) && - (_tmp_sites.length() > 0) - then - _site = _tmp_sites.first - end - - if _site.nil? - # the device site is not found, create it - _site_data = { :name => site_name, - :slug => site_name.to_url, - :status => "active" } - if (_site_create_response = nb.post('dcim/sites/', _site_data.to_json, @nb_headers).body) && - _site_create_response.is_a?(Hash) && - _site_create_response.has_key?(:id) + if !site_name.to_s.empty? + @site_hash.getset(site_name) { + begin + _site = nil + + # look it up first + _query = { :offset => 0, + :limit => 1, + :name => site_name } + if (_sites_response = nb.get('dcim/sites/', _query).body) && + _sites_response.is_a?(Hash) && + (_tmp_sites = _sites_response.fetch(:results, [])) && + (_tmp_sites.length() > 0) then - _site = _site_create_response + _site = _tmp_sites.first end - end - rescue Faraday::Error - # give up aka do nothing - end - _site - } + if _site.nil? + # the device site is not found, create it + _site_data = { :name => site_name, + :slug => site_name.to_url, + :status => "active" } + if (_site_create_response = nb.post('dcim/sites/', _site_data.to_json, @nb_headers).body) && + _site_create_response.is_a?(Hash) && + _site_create_response.has_key?(:id) + then + _site = _site_create_response + elsif @debug + puts('lookup_or_create_site (%{name}): _site_create_response: %{result}' % { name: site_name, result: JSON.generate(_site_create_response) }) + end + end + + rescue Faraday::Error => e + # give up aka do nothing + puts "lookup_or_create_site (#{site_name}): #{e.message}" if @debug + end + _site + }.dup + else + nil + end end def lookup_manuf( oui, nb ) - @manuf_hash.getset(oui) { - _fuzzy_matcher = FuzzyStringMatch::JaroWinkler.create( :pure ) - _oui_cleaned = clean_manuf_string(oui.to_s) - _manufs = Array.new - # fetch the manufacturers to do the comparison. this is a lot of work - # and not terribly fast but once the hash it populated it shouldn't happen too often - _query = { :offset => 0, - :limit => @page_size } - begin - while true do - if (_manufs_response = nb.get('dcim/manufacturers/', _query).body) && - _manufs_response.is_a?(Hash) - then - _tmp_manufs = _manufs_response.fetch(:results, []) - _tmp_manufs.each do |_manuf| - _tmp_name = _manuf.fetch(:name, _manuf.fetch(:display, nil)) - _tmp_distance = _fuzzy_matcher.getDistance(clean_manuf_string(_tmp_name.to_s), _oui_cleaned) - if (_tmp_distance >= @autopopulate_fuzzy_threshold) then - _manufs << { :name => _tmp_name, - :id => _manuf.fetch(:id, nil), - :url => _manuf.fetch(:url, nil), - :match => _tmp_distance, - :vm => false - } + if !oui.to_s.empty? + @manuf_hash.getset(oui) { + _fuzzy_matcher = FuzzyStringMatch::JaroWinkler.create( :pure ) + _oui_cleaned = clean_manuf_string(oui.to_s) + _manufs = Array.new + # fetch the manufacturers to do the comparison. this is a lot of work + # and not terribly fast but once the hash it populated it shouldn't happen too often + _query = { :offset => 0, + :limit => @page_size } + begin + while true do + if (_manufs_response = nb.get('dcim/manufacturers/', _query).body) && + _manufs_response.is_a?(Hash) + then + _tmp_manufs = _manufs_response.fetch(:results, []) + _tmp_manufs.each do |_manuf| + _tmp_name = _manuf.fetch(:name, _manuf.fetch(:display, nil)) + _tmp_distance = _fuzzy_matcher.getDistance(clean_manuf_string(_tmp_name.to_s), _oui_cleaned) + if (_tmp_distance >= @autopopulate_fuzzy_threshold) then + _manufs << { :name => _tmp_name, + :id => _manuf.fetch(:id, nil), + :url => _manuf.fetch(:url, nil), + :match => _tmp_distance, + :vm => false } + end end + _query[:offset] += _tmp_manufs.length() + break unless (_tmp_manufs.length() >= @page_size) + else + break end - _query[:offset] += _tmp_manufs.length() - break unless (_tmp_manufs.length() >= @page_size) - else - break end + rescue Faraday::Error => e + # give up aka do nothing + puts "lookup_manuf (#{oui}): #{e.message}" if @debug end - rescue Faraday::Error - # give up aka do nothing - end - # return the manuf with the highest match - # puts('0. %{key}: %{matches}' % { key: _autopopulate_oui_cleaned, matches: JSON.generate(_manufs) })-] - !_manufs&.empty? ? _manufs.max_by{|k| k[:match] } : nil - } + # return the manuf with the highest match + # puts('0. %{key}: %{matches}' % { key: _autopopulate_oui_cleaned, matches: JSON.generate(_manufs) })-] + !_manufs&.empty? ? _manufs.max_by{|k| k[:match] } : nil + }.dup + else + nil + end end +def lookup_or_create_manuf_and_dtype( + oui, + default_manuf, + default_dtype, + nb +) + _oui = oui + _dtype = nil + _manuf = nil + + begin + # match/look up manufacturer based on OUI + if !_oui.nil? && !_oui.empty? + _oui = _oui.first() unless !_oui.is_a?(Array) + # does it look like a VM or a regular device? + if @vm_namesarray.include?(_oui.downcase) + # looks like this is probably a virtual machine + _manuf = { :name => _oui, + :match => 1.0, + :vm => true, + :id => nil } + else + # looks like this is not a virtual machine (or we can't tell) so assume it's a regular device + _manuf = lookup_manuf(_oui, nb) + end # virtual machine vs. regular device + end # oui specified + + # puts('1. %{key}: %{found}' % { key: oui, found: JSON.generate(_manuf) }) + if !_manuf.is_a?(Hash) + # no match was found at ANY match level (empty database or no OUI specified), set default ("unspecified") manufacturer + _manuf = { :name => (@autopopulate_create_manuf && !_oui.nil? && !_oui.empty?) ? _oui : default_manuf, + :match => 0.0, + :vm => false, + :id => nil} + end + # puts('2. %{key}: %{found}' % { key: _oui, found: JSON.generate(_manuf) }) + + if !_manuf[:vm] + + if !_manuf.fetch(:id, nil)&.nonzero? + # the manufacturer was default (not found) so look it up first + _query = { :offset => 0, + :limit => 1, + :name => _manuf[:name] } + if (_manufs_response = nb.get('dcim/manufacturers/', _query).body) && + _manufs_response.is_a?(Hash) && + (_tmp_manufs = _manufs_response.fetch(:results, [])) && + (_tmp_manufs.length() > 0) + then + _manuf[:id] = _tmp_manufs.first.fetch(:id, nil) + _manuf[:match] = 1.0 + end + end + # puts('3. %{key}: %{found}' % { key: _oui, found: JSON.generate(_manuf) }) + + if !_manuf.fetch(:id, nil)&.nonzero? + # the manufacturer is still not found, create it + _manuf_data = { :name => _manuf[:name], + :tags => [ @device_tag_autopopulated ], + :slug => _manuf[:name].to_url } + if (_manuf_create_response = nb.post('dcim/manufacturers/', _manuf_data.to_json, @nb_headers).body) && + _manuf_create_response.is_a?(Hash) + then + _manuf[:id] = _manuf_create_response.fetch(:id, nil) + _manuf[:match] = 1.0 + elsif @debug + puts('lookup_or_create_manuf_and_dtype (%{name}): _manuf_create_response: %{result}' % { name: _manuf[:name], result: JSON.generate(_manuf_create_response) }) + end + # puts('4. %{key}: %{created}' % { key: _manuf, created: JSON.generate(_manuf_create_response) }) + end + + # at this point we *must* have the manufacturer ID + if _manuf.fetch(:id, nil)&.nonzero? + + # make sure the desired device type also exists, look it up first + _query = { :offset => 0, + :limit => 1, + :manufacturer_id => _manuf[:id], + :model => default_dtype } + if (_dtypes_response = nb.get('dcim/device-types/', _query).body) && + _dtypes_response.is_a?(Hash) && + (_tmp_dtypes = _dtypes_response.fetch(:results, [])) && + (_tmp_dtypes.length() > 0) + then + _dtype = _tmp_dtypes.first + end + + if _dtype.nil? + # the device type is not found, create it + _dtype_data = { :manufacturer => _manuf[:id], + :model => default_dtype, + :tags => [ @device_tag_autopopulated ], + :slug => default_dtype.to_url } + if (_dtype_create_response = nb.post('dcim/device-types/', _dtype_data.to_json, @nb_headers).body) && + _dtype_create_response.is_a?(Hash) && + _dtype_create_response.has_key?(:id) + then + _dtype = _dtype_create_response + elsif @debug + puts('lookup_or_create_manuf_and_dtype (%{name}: _dtype_create_response: %{result}' % { name: default_dtype, result: JSON.generate(_dtype_create_response) }) + end + end + + end # _manuf :id check + end # _manuf is not a VM + + rescue Faraday::Error => e + # give up aka do nothing + puts "lookup_or_create_manuf_and_dtype (#{oui}): #{e.message}" if @debug + end + + return _dtype, _manuf + +end # def lookup_or_create_manuf_and_dtype + def lookup_prefixes( ip_str, lookup_site, @@ -559,11 +684,12 @@ def lookup_prefixes( _prefixName = p.fetch(:display, p.fetch(:prefix, nil)) end prefixes << { :name => _prefixName, - :id => p.fetch(:id, nil), - :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), - :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), - :url => p.fetch(:url, p.fetch(:url, nil)), - :details => @verbose ? p : nil } + :id => p.fetch(:id, nil), + :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), + :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), + :url => p.fetch(:url, nil), + :tags => p.fetch(:tags, nil), + :details => @verbose ? p : nil } end _query[:offset] += _tmp_prefixes.length() break unless (_tmp_prefixes.length() >= @page_size) @@ -571,8 +697,9 @@ def lookup_prefixes( break end end - rescue Faraday::Error + rescue Faraday::Error => e # give up aka do nothing + puts "lookup_prefixes (#{ip_str}): #{e.message}" if @debug end prefixes @@ -582,40 +709,47 @@ def lookup_or_create_role( role_name, nb ) - @role_hash.getset(role_name) { - begin - _role = nil - - # look it up first - _query = { :offset => 0, - :limit => 1, - :name => role_name } - if (_roles_response = nb.get('dcim/device-roles/', _query).body) && - _roles_response.is_a?(Hash) && - (_tmp_roles = _roles_response.fetch(:results, [])) && - (_tmp_roles.length() > 0) - then - _role = _tmp_roles.first - end - - if _role.nil? - # the role is not found, create it - _role_data = { :name => role_name, - :slug => role_name.to_url, - :color => "d3d3d3" } - if (_role_create_response = nb.post('dcim/device-roles/', _role_data.to_json, @nb_headers).body) && - _role_create_response.is_a?(Hash) && - _role_create_response.has_key?(:id) + if !role_name.to_s.empty? + @role_hash.getset(role_name) { + begin + _role = nil + + # look it up first + _query = { :offset => 0, + :limit => 1, + :name => role_name } + if (_roles_response = nb.get('dcim/device-roles/', _query).body) && + _roles_response.is_a?(Hash) && + (_tmp_roles = _roles_response.fetch(:results, [])) && + (_tmp_roles.length() > 0) then - _role = _role_create_response + _role = _tmp_roles.first end - end - rescue Faraday::Error - # give up aka do nothing - end - _role - } + if _role.nil? + # the role is not found, create it + _role_data = { :name => role_name, + :slug => role_name.to_url, + :color => "d3d3d3" } + if (_role_create_response = nb.post('dcim/device-roles/', _role_data.to_json, @nb_headers).body) && + _role_create_response.is_a?(Hash) && + _role_create_response.has_key?(:id) + then + _role = _role_create_response + elsif @debug + puts('lookup_or_create_role (%{name}): _role_create_response: %{result}' % { name: role_name, result: JSON.generate(_role_create_response) }) + end + end + + rescue Faraday::Error => e + # give up aka do nothing + puts "lookup_or_create_role (#{role_name}): #{e.message}" if @debug + end + _role + }.dup + else + nil + end end def lookup_devices( @@ -672,6 +806,7 @@ def lookup_devices( _devices << { :name => _device.fetch(:name, _device.fetch(:display, nil)), :id => _device_id, :url => _device.fetch(:url, nil), + :tags => _device.fetch(:tags, nil), :service => _device.fetch(:service, []).map {|s| s.fetch(:name, s.fetch(:display, nil)) }, :site => _device_site, :role => ((_role = _device.fetch(:role, nil)) && _role&.has_key?(:name)) ? _role[:name] : _role&.fetch(:display, nil), @@ -688,8 +823,9 @@ def lookup_devices( break end end # while true - rescue Faraday::Error + rescue Faraday::Error => e # give up aka do nothing + puts "lookup_devices (#{ip_str}, #{lookup_site}): #{e.message}" if @debug end _devices end @@ -703,73 +839,32 @@ def autopopulate_devices( autopopulate_default_dtype, autopopulate_default_manuf, autopopulate_hostname, + autopopulate_default_status, nb ) _autopopulate_device = nil _autopopulate_role = nil - _autopopulate_dtype = nil _autopopulate_oui = autopopulate_oui - _autopopulate_manuf = nil _autopopulate_site = nil + _autopopulate_tags = [ @device_tag_autopopulated ] + _autopopulate_tags << @device_tag_hostname_unknown if autopopulate_hostname.to_s.empty? # if MAC is set but OUI is not, do a quick lookup if (!autopopulate_mac.nil? && !autopopulate_mac.empty?) && (_autopopulate_oui.nil? || _autopopulate_oui.empty?) then - case autopopulate_mac - when String - if @macregex.match?(autopopulate_mac) - _macint = mac_string_to_integer(autopopulate_mac) - _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} - _autopopulate_oui = _vendor[2] unless _vendor.nil? - end # autopopulate_mac matches @macregex - when Array - autopopulate_mac.each do |_addr| - if @macregex.match?(_addr) - _macint = mac_string_to_integer(_addr) - _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} - if !_vendor.nil? - _autopopulate_oui = _vendor[2] - break - end # !_vendor.nil? - end # _addr matches @macregex - end # autopopulate_mac.each do - end # case statement autopopulate_mac String vs. Array - end # MAC is populated but OUI is not - - # match/look up manufacturer based on OUI - if !_autopopulate_oui.nil? && !_autopopulate_oui.empty? - - _autopopulate_oui = _autopopulate_oui.first() unless !_autopopulate_oui.is_a?(Array) - - # does it look like a VM or a regular device? - if @vm_namesarray.include?(_autopopulate_oui.downcase) - # looks like this is probably a virtual machine - _autopopulate_manuf = { :name => _autopopulate_oui, - :match => 1.0, - :vm => true, - :id => nil } + _autopopulate_oui = mac_to_oui_lookup(autopopulate_mac) + end - else - # looks like this is not a virtual machine (or we can't tell) so assume its' a regular device - _autopopulate_manuf = lookup_manuf(_autopopulate_oui, nb) - end # virtual machine vs. regular device - end # _autopopulate_oui specified - - # puts('1. %{key}: %{found}' % { key: _autopopulate_oui, found: JSON.generate(_autopopulate_manuf) }) - if !_autopopulate_manuf.is_a?(Hash) - # no match was found at ANY match level (empty database or no OUI specified), set default ("unspecified") manufacturer - _autopopulate_manuf = { :name => (@autopopulate_create_manuf && !_autopopulate_oui.nil? && !_autopopulate_oui.empty?) ? _autopopulate_oui : autopopulate_default_manuf, - :match => 0.0, - :vm => false, - :id => nil} - end - # puts('2. %{key}: %{found}' % { key: _autopopulate_oui, found: JSON.generate(_autopopulate_manuf) }) - - # make sure the site and role exists + # make sure the site, role, manufacturer and device type exist _autopopulate_site = lookup_or_create_site(autopopulate_default_site_name, nb) _autopopulate_role = lookup_or_create_role(autopopulate_default_role_name, nb) + _autopopulate_dtype, + _autopopulate_manuf = lookup_or_create_manuf_and_dtype(_autopopulate_oui, + autopopulate_default_manuf, + autopopulate_default_dtype, + nb) # we should have found or created the autopopulate role and site begin @@ -777,117 +872,69 @@ def autopopulate_devices( _autopopulate_role&.fetch(:id, nil)&.nonzero? then - if _autopopulate_manuf[:vm] + if _autopopulate_manuf&.fetch(:vm, false) # a virtual machine - _device_name = autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{ip_str}" : "#{autopopulate_hostname} @ #{ip_str}" + _device_name = autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{ip_str}" : autopopulate_hostname _device_data = { :name => _device_name, :site => _autopopulate_site[:id], - :status => "staged" } + :tags => _autopopulate_tags, + :status => autopopulate_default_status } if (_device_create_response = nb.post('virtualization/virtual-machines/', _device_data.to_json, @nb_headers).body) && _device_create_response.is_a?(Hash) && _device_create_response.has_key?(:id) then _autopopulate_device = _device_create_response + elsif @debug + puts('autopopulate_devices (VM: %{name}): _device_create_response: %{result}' % { name: _device_name, result: JSON.generate(_device_create_response) }) end else - # a regular non-vm device + # a regular non-vm device: at this point we *must* have the manufacturer ID and device type ID + if _autopopulate_manuf&.fetch(:id, nil)&.nonzero? && + _autopopulate_dtype&.fetch(:id, nil)&.nonzero? + then - if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? - # the manufacturer was default (not found) so look it up first - _query = { :offset => 0, - :limit => 1, - :name => _autopopulate_manuf[:name] } - if (_manufs_response = nb.get('dcim/manufacturers/', _query).body) && - _manufs_response.is_a?(Hash) && - (_tmp_manufs = _manufs_response.fetch(:results, [])) && - (_tmp_manufs.length() > 0) - then - _autopopulate_manuf[:id] = _tmp_manufs.first.fetch(:id, nil) - _autopopulate_manuf[:match] = 1.0 - end - end - # puts('3. %{key}: %{found}' % { key: _autopopulate_oui, found: JSON.generate(_autopopulate_manuf) }) - - if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? - # the manufacturer is still not found, create it - _manuf_data = { :name => _autopopulate_manuf[:name], - :slug => _autopopulate_manuf[:name].to_url } - if (_manuf_create_response = nb.post('dcim/manufacturers/', _manuf_data.to_json, @nb_headers).body) && - _manuf_create_response.is_a?(Hash) + # never figured out the manufacturer (actually, we were never even given the fields to do so), so tag it as such + if ((_autopopulate_manuf.fetch(:name, autopopulate_default_manuf) == autopopulate_default_manuf) && + autopopulate_mac.to_s.empty? && _autopopulate_oui.to_s.empty?) then - _autopopulate_manuf[:id] = _manuf_create_response.fetch(:id, nil) - _autopopulate_manuf[:match] = 1.0 + _autopopulate_tags << @device_tag_manufacturer_unknown end - # puts('4. %{key}: %{created}' % { key: _autopopulate_manuf, created: JSON.generate(_manuf_create_response) }) - end - # at this point we *must* have the manufacturer ID - if _autopopulate_manuf.fetch(:id, nil)&.nonzero? - - # make sure the desired device type also exists, look it up first - _query = { :offset => 0, - :limit => 1, - :manufacturer_id => _autopopulate_manuf[:id], - :model => autopopulate_default_dtype } - if (_dtypes_response = nb.get('dcim/device-types/', _query).body) && - _dtypes_response.is_a?(Hash) && - (_tmp_dtypes = _dtypes_response.fetch(:results, [])) && - (_tmp_dtypes.length() > 0) + # create the device + _device_name = autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{ip_str}" : autopopulate_hostname + _device_data = { :name => _device_name, + :device_type => _autopopulate_dtype[:id], + :role => _autopopulate_role[:id], + :site => _autopopulate_site[:id], + :tags => _autopopulate_tags, + :status => autopopulate_default_status } + if (_device_create_response = nb.post('dcim/devices/', _device_data.to_json, @nb_headers).body) && + _device_create_response.is_a?(Hash) && + _device_create_response.has_key?(:id) then - _autopopulate_dtype = _tmp_dtypes.first - end - - if _autopopulate_dtype.nil? - # the device type is not found, create it - _dtype_data = { :manufacturer => _autopopulate_manuf[:id], - :model => autopopulate_default_dtype, - :slug => autopopulate_default_dtype.to_url } - if (_dtype_create_response = nb.post('dcim/device-types/', _dtype_data.to_json, @nb_headers).body) && - _dtype_create_response.is_a?(Hash) && - _dtype_create_response.has_key?(:id) - then - _autopopulate_dtype = _dtype_create_response - end + _autopopulate_device = _device_create_response + elsif @debug + puts('autopopulate_devices (%{name}): _device_create_response: %{result}' % { name: _device_name, result: JSON.generate(_device_create_response) }) end - # # now we must also have the device type ID - if _autopopulate_dtype&.fetch(:id, nil)&.nonzero? - - # create the device - _device_name = autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{ip_str}" : "#{autopopulate_hostname} @ #{ip_str}" - _device_data = { :name => _device_name, - :device_type => _autopopulate_dtype[:id], - :role => _autopopulate_role[:id], - :site => _autopopulate_site[:id], - :status => "staged" } - if (_device_create_response = nb.post('dcim/devices/', _device_data.to_json, @nb_headers).body) && - _device_create_response.is_a?(Hash) && - _device_create_response.has_key?(:id) - then - _autopopulate_device = _device_create_response - end - - else - # didn't figure out the device type ID, make sure we're not setting something half-populated - _autopopulate_dtype = nil - end # _autopopulate_dtype[:id] is valid - else - # didn't figure out the manufacturer ID, make sure we're not setting something half-populated + # didn't figure out the manufacturer ID and/or device type ID, make sure we're not setting something half-populated _autopopulate_manuf = nil - end # _autopopulate_manuf[:id] is valid + _autopopulate_dtype = nil + end # _autopopulate_manuf[:id] is valid and _autopopulate_dtype[:id] is valid end # virtual machine vs. regular device else - # didn't figure out the IDs, make sure we're not setting something half-populated + # didn't figure out the site and/or role IDs, make sure we're not setting something half-populated _autopopulate_site = nil _autopopulate_role = nil end # site and role are valid - rescue Faraday::Error + rescue Faraday::Error => e # give up aka do nothing + puts "autopopulate_devices (#{ip_str}): #{e.message}" if @debug end return _autopopulate_device, @@ -901,8 +948,11 @@ def autopopulate_devices( def autopopulate_prefixes( ip_obj, autopopulate_default_site, + autopopulate_default_status, nb ) + _autopopulate_tags = [ @device_tag_autopopulated ] + _prefix_data = nil # TODO: IPv6? _private_ip_subnet = @private_ip_subnets.find { |subnet| subnet.include?(ip_obj) } @@ -915,8 +965,9 @@ def autopopulate_prefixes( _autopopulate_site = lookup_or_create_site(autopopulate_default_site, nb) _prefix_post = { :prefix => _new_prefix_name, :description => _new_prefix_name, + :tags => _autopopulate_tags, :site => _autopopulate_site&.fetch(:id, nil), - :status => "active" } + :status => autopopulate_default_status } begin _new_prefix_create_response = nb.post('ipam/prefixes/', _prefix_post.to_json, @nb_headers).body if _new_prefix_create_response && @@ -927,11 +978,15 @@ def autopopulate_prefixes( :id => _new_prefix_create_response.fetch(:id, nil), :site => ((_site = _new_prefix_create_response.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), :tenant => ((_tenant = _new_prefix_create_response.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), - :url => _new_prefix_create_response.fetch(:url, _new_prefix_create_response.fetch(:url, nil)), + :url => _new_prefix_create_response.fetch(:url, nil), + :tags => _new_prefix_create_response.fetch(:tags, nil), :details => @verbose ? _new_prefix_create_response : nil } + elsif @debug + puts('autopopulate_prefixes: _new_prefix_create_response: %{result}' % { result: JSON.generate(_new_prefix_create_response) }) end - rescue Faraday::Error + rescue Faraday::Error => e # give up aka do nothing + puts "autopopulate_prefixes (#{ip_obj.to_s}): #{e.message}" if @debug end end _prefix_data @@ -961,6 +1016,8 @@ def create_device_interface( _interface_create_reponse.has_key?(:id) then _autopopulate_interface = _interface_create_reponse + elsif @debug + puts('create_device_interface (%{name}): _interface_create_reponse: %{result}' % { name: ip_str, result: JSON.generate(_interface_create_reponse) }) end if !_autopopulate_interface.nil? && _autopopulate_interface.fetch(:id, nil)&.nonzero? @@ -978,6 +1035,8 @@ def create_device_interface( _ip_create_reponse.has_key?(:id) then _autopopulate_ip = _ip_create_reponse + elsif @debug + puts('create_device_interface (%{name}): _ip_create_reponse: %{result}' % { name: _interface_address, result: JSON.generate(_ip_create_reponse) }) end end # check if interface was created and has ID @@ -988,13 +1047,270 @@ def create_device_interface( _ip_primary_reponse.is_a?(Hash) && _ip_primary_reponse.has_key?(:id) then - _autopopulate_device = _ip_create_reponse + _autopopulate_device = _ip_primary_reponse + elsif @debug + puts('create_device_interface (%{name}): _ip_primary_reponse: %{result}' % { name: _interface_address, result: JSON.generate(_ip_primary_reponse) }) end end # check if the IP address was created and has an ID _autopopulate_device end +def netbox_lookup( + event:, + ip_key:, + previous_result: nil +) + _lookup_result = nil + + _key_ip = IPAddr.new(ip_key) rescue nil + if !_key_ip.nil? && _key_ip&.private? && (@autopopulate || (!@target.nil? && !@target.empty?)) + + _nb = Faraday.new(@netbox_url) do |conn| + conn.request :authorization, 'Token', @netbox_token + conn.request :url_encoded + conn.response :json, :parser_options => { :symbolize_names => true } + end + + _lookup_service_port = (@lookup_service ? event.get("#{@lookup_service_port_source}") : nil).to_i + _autopopulate_default_manuf = (@default_manuf.nil? || @default_manuf.empty?) ? "Unspecified" : @default_manuf + _autopopulate_default_role = (@default_role.nil? || @default_role.empty?) ? "Unspecified" : @default_role + _autopopulate_default_dtype = (@default_dtype.nil? || @default_dtype.empty?) ? "Unspecified" : @default_dtype + _autopopulate_default_site = (@lookup_site.nil? || @lookup_site.empty?) ? "default" : @lookup_site + _autopopulate_hostname = event.get("#{@source_hostname}") + _autopopulate_hostname = nil if _autopopulate_hostname.to_s.end_with?('.in-addr.arpa') + _autopopulate_mac = event.get("#{@source_mac}") + _autopopulate_oui = event.get("#{@source_oui}") + + _autopopulate_device = nil + _autopopulate_role = nil + _autopopulate_dtype = nil + _autopopulate_manuf = nil + _autopopulate_site = nil + _prefixes = nil + _devices = nil + + # handle :ip_device first, because if we're doing autopopulate we're also going to use + # some of the logic from :ip_prefix + + if (@lookup_type == :ip_device) + + if (previous_result.nil? || previous_result.empty?) + ################################################################################# + # retrieve the list of IP addresses where address matches the search key, limited to "assigned" addresses. + # then, for those IP addresses, search for devices pertaining to the interfaces assigned to each + # IP address (e.g., ipam.ip_address -> dcim.interface -> dcim.device, or + # ipam.ip_address -> virtualization.interface -> virtualization.virtual_machine) + _devices = lookup_devices(ip_key, @lookup_site, _lookup_service_port, @netbox_url_base, @netbox_url_suffix, _nb) + + if @autopopulate && (_devices.nil? || _devices.empty?) + # no results found, autopopulate enabled, private-space IP address... + # let's create an entry for this device + _autopopulate_device, + _autopopulate_role, + _autopopulate_dtype, + _autopopulate_oui, + _autopopulate_manuf, + _autopopulate_site = autopopulate_devices(ip_key, + _autopopulate_mac, + _autopopulate_oui, + _autopopulate_default_site, + _autopopulate_default_role, + _autopopulate_default_dtype, + _autopopulate_default_manuf, + _autopopulate_hostname, + @default_status, + _nb) + if !_autopopulate_device.nil? + # puts('5. %{key}: %{found}' % { key: autopopulate_oui, found: JSON.generate(_autopopulate_manuf) }) + # we created a device, so send it back out as the result for the event as well + _devices = Array.new unless _devices.is_a?(Array) + _devices << { :name => _autopopulate_device&.fetch(:name, _autopopulate_device&.fetch(:display, nil)), + :id => _autopopulate_device&.fetch(:id, nil), + :url => _autopopulate_device&.fetch(:url, nil), + :tags => _autopopulate_device&.fetch(:tags, nil), + :site => _autopopulate_site&.fetch(:name, nil), + :role => _autopopulate_role&.fetch(:name, nil), + :device_type => _autopopulate_dtype&.fetch(:name, nil), + :manufacturer => _autopopulate_manuf&.fetch(:name, nil), + :details => @verbose ? _autopopulate_device : nil } + end # _autopopulate_device was not nil (i.e., we autocreated a device) + end # _autopopulate turned on and no results found + + elsif @autopopulate + + ################################################################################# + # update with new information on an existing device (i.e., from a previous call to netbox_lookup) + _patched_device_data = Hash.new + + # get existing tags to update them to remove "unkown-..." values if needed + _tags = previous_result.fetch(:tags, nil)&.flatten&.map{ |hash| { slug: hash[:slug] } }&.uniq + + # API endpoints are different for VM vs real device + _was_vm = (previous_result.fetch(:device_type, nil)&.flatten&.any? {|dt| dt == @virtual_machine_device_type_name} || + (previous_result.has_key?(:url) && !previous_result[:url]&.empty? && previous_result[:url].any? { |u| u.include? "virtual-machines" })) + + # get previous device ID (should only be dealing with a single device) + _previous_device_id = previous_result.fetch(:id, nil)&.flatten&.uniq + + # puts('netbox_lookup maybe patching %{name} (%{id}, VM old: %{oldvm}) for "%{tags}" ("%{host}", "%{mac}", "%{oui}")' % { + # name: ip_key, + # id: _previous_device_id, + # oldvm: _was_vm, + # tags: _tags.is_a?(Array) ? _tags.map{ |hash| hash[:slug] }.join('|') : '', + # host: _autopopulate_hostname.to_s, + # mac: _autopopulate_mac.to_s, + # oui: _autopopulate_oui.to_s }) if @debug + + if _previous_device_id.is_a?(Array) && + (_previous_device_id.length() == 1) && + (_previous_device_id = _previous_device_id.first) + then + _previous_device_site = [previous_result.fetch(:site, nil)].flatten.uniq.first + + if !_autopopulate_hostname.to_s.empty? && + _tags&.any? {|tag| tag[:slug] == @device_tag_hostname_unknown[:slug]} + then + # a hostname field was specified where before we had none, which means we're going to overwrite + # the device name previously created which was probably something like "Dell @ 192.168.10.100" + # and also remove the "unknown hostname" tag + _patched_device_data[:name] = _autopopulate_hostname + _tags = _tags.filter{|tag| tag[:slug] != @device_tag_hostname_unknown[:slug]} + end + + if ((!_autopopulate_mac.to_s.empty? || !_autopopulate_oui.to_s.empty?) && + _tags&.any? {|tag| tag[:slug] == @device_tag_manufacturer_unknown[:slug]}) + # if MAC is set but OUI is not, do a quick lookup + if (!_autopopulate_mac.nil? && !_autopopulate_mac.empty?) && + (_autopopulate_oui.nil? || _autopopulate_oui.empty?) + then + _autopopulate_oui = mac_to_oui_lookup(_autopopulate_mac) + end + # a MAC address or OUI field was specified where before we had none, which means we're going to overwrite + # the device manufacturer previously created which was probably something like "Unspecified" + # and also remove the "unknown manufacturer" tag + _autopopulate_dtype, + _autopopulate_manuf = lookup_or_create_manuf_and_dtype(_autopopulate_oui, + _autopopulate_default_manuf, + _autopopulate_default_dtype, + _nb) + if _autopopulate_dtype&.fetch(:id, nil)&.nonzero? + _patched_device_data[:device_type] = _autopopulate_dtype[:id] + end + _tags = _tags.filter{|tag| tag[:slug] != @device_tag_manufacturer_unknown[:slug]} + end + + # We could have created a device (without mac/OUI) based on hostname, and now only realize that + # it's actually a VM. However, a device can't have been autopopulated as a VM and then later + # "become" a device, since the only reason we'd have created it as a VM would be because + # we saw the OUI (from real traffic) in @vm_namesarray in the first place. + _is_vm = _was_vm || (_autopopulate_manuf.is_a?(Hash) && (_autopopulate_manuf.fetch(:vm, false) == true)) + _device_to_vm = ((_was_vm == false) && (_is_vm == true)) + + if !_patched_device_data.empty? || _device_to_vm + # we've got changes to make, so do it + _device_written = false + + puts('netbox_lookup patching %{name} @ %{site} (%{id}, VM: %{wasvm}->%{isvm}) ("%{host}", "%{mac}", "%{oui}"): %{changes}' % { + name: ip_key, + site: _previous_device_site, + id: _previous_device_id, + wasvm: _was_vm, + isvm: _is_vm, + host: _autopopulate_hostname.to_s, + mac: _autopopulate_mac.to_s, + oui: _autopopulate_oui.to_s, + changes: JSON.generate(_patched_device_data) }) if @debug + + if _device_to_vm + # you can't "convert" a device to a VM, so we have to create a new VM then delete the old device + _vm_data = { :name => _patched_device_data.fetch(:name, [previous_result.fetch(:name, nil)])&.flatten&.uniq.first, + :site => ((_previous_device_site_obj = lookup_or_create_site(_previous_device_site, _nb)) && + _previous_device_site_obj.is_a?(Hash) && + _previous_device_site_obj.has_key?(:id)) ? _previous_device_site_obj[:id] : { :slug => _previous_device_site.to_url }, + :tags => _tags, + :status => @default_status } + if (_vm_create_response = _nb.post('virtualization/virtual-machines/', _vm_data.to_json, @nb_headers).body) && + _vm_create_response.is_a?(Hash) && + _vm_create_response.has_key?(:id) + then + _device_written = true + _autopopulate_device = _vm_create_response + # we've created the device as a VM, create_device_interface will be called below to create its interface + + # now delete the old device entry + _old_device_delete_response = _nb.delete("dcim/devices/#{_previous_device_id}/") + puts('netbox_lookup (%{name}: dev.%{oldid} -> vm.%{newid}): _old_device_delete_response: %{success}' % { + name: _vm_data[:name], + oldid: _previous_device_id, + newid: _vm_create_response[:id], + success: _old_device_delete_response.success? }) if @debug + elsif @debug + puts('netbox_lookup (%{name}): _vm_create_response: %{result}' % { name: _vm_data[:name], result: JSON.generate(_vm_create_response) }) + end + + elsif (_is_vm == _was_vm) + # the type of object (vm vs. device) is the same as it was before, so we're just doing an update + _patched_device_data[:tags] = _tags + if (_patched_device_response = _nb.patch("#{_was_vm ? 'virtualization/virtual-machines' : 'dcim/devices'}/#{_previous_device_id}/", _patched_device_data.to_json, @nb_headers).body) && + _patched_device_response.is_a?(Hash) && + _patched_device_response.has_key?(:id) + then + _device_written = true + elsif @debug + puts('netbox_lookup (%{name}): _patched_device_response: %{result}' % { name: _previous_device_id, result: JSON.generate(_patched_device_response) }) + end # _nb.patch succeeded + end # _is_vm vs _was_vm check + + # we've made the change to netbox, do a call to lookup_devices to get the formatted/updated data + # (yeah, this is a *little* inefficient, but this should really only happen one extra time per device at most) + _devices = lookup_devices(ip_key, @lookup_site, _lookup_service_port, @netbox_url_base, @netbox_url_suffix, _nb) if _device_written + + end # check _patched_device_data, _device_to_vm + + end # check previous device ID is valid + end # check on previous_result function argument + + if !_devices.nil? + _devices = collect_values(crush(_devices)) + _devices.fetch(:service, [])&.flatten!&.uniq! + _lookup_result = _devices + end + end # @lookup_type == :ip_device + + # this || is because we are going to need to do the prefix lookup if we're autopopulating + # as well as if we're specifically requested to do that enrichment + + if (@lookup_type == :ip_prefix) || !_autopopulate_device.nil? + ################################################################################# + # retrieve the list of IP address prefixes containing the search key + _prefixes = lookup_prefixes(ip_key, @lookup_site, _nb) + + if (_prefixes.nil? || _prefixes.empty?) && @autopopulate_create_prefix + # we didn't find a prefix containing this private-space IPv4 address and auto-create is true + _prefix_info = autopopulate_prefixes(_key_ip, _autopopulate_default_site, @default_status, _nb) + _prefixes = Array.new unless _prefixes.is_a?(Array) + _prefixes << _prefix_info + end # if auto-create prefix + + _prefixes = collect_values(crush(_prefixes)) + _lookup_result = _prefixes unless (@lookup_type != :ip_prefix) + end # @lookup_type == :ip_prefix + + if !_autopopulate_device.nil? && _autopopulate_device.fetch(:id, nil)&.nonzero? + # device has been created, we need to create an interface for it + _autopopulate_device = create_device_interface(ip_key, + _autopopulate_device, + _autopopulate_manuf, + _autopopulate_mac, + _nb) + end # check if device was created and has ID + end # IP address is private IP + + # yield return value for cache_hash getset + _lookup_result +end + ############################################################################### # tests diff --git a/malcolm-iso/config/package-lists/system.list.chroot b/malcolm-iso/config/package-lists/system.list.chroot index 9525899e4..9521d5917 100644 --- a/malcolm-iso/config/package-lists/system.list.chroot +++ b/malcolm-iso/config/package-lists/system.list.chroot @@ -100,6 +100,7 @@ mcrypt md5deep menu miscfiles +mmv moreutils mtools multitail diff --git a/netbox/preload/tags.yml b/netbox/preload/tags.yml new file mode 100644 index 000000000..00aaea094 --- /dev/null +++ b/netbox/preload/tags.yml @@ -0,0 +1,9 @@ +- name: Autopopulated + slug: malcolm-autopopulated + color: Light Blue +- name: Manufacturer Unknown + slug: manufacturer-unknown + color: Light Grey +- name: Hostname Unknown + slug: hostname-unknown + color: Light Grey diff --git a/nginx/nginx.conf b/nginx/nginx.conf index f05786b5c..97acc2cf6 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -199,6 +199,18 @@ http { proxy_set_header Host file-monitor.malcolm.local; } + # extracted file download hedgehog redirect + location ~* ^/hh-extracted-files/([a-zA-Z0-9-\.]+)\b(.*) { + include /etc/nginx/nginx_auth_rt.conf; + include /etc/nginx/nginx_system_resolver.conf; + set $upstream $1:8006; + rewrite ^/hh-extracted-files/([a-zA-Z0-9-\.]+)(.*)$ $2 break; + proxy_pass https://$upstream; + proxy_ssl_verify off; + proxy_set_header Host $1; + proxy_set_header X-Malcolm-Forward "/hh-extracted-files/$1"; + } + # netbox location /netbox { include /etc/nginx/nginx_auth_rt.conf; diff --git a/nginx/nginx_readonly.conf b/nginx/nginx_readonly.conf index 1251cf46f..94fd75e16 100644 --- a/nginx/nginx_readonly.conf +++ b/nginx/nginx_readonly.conf @@ -129,6 +129,17 @@ http { proxy_set_header Host file-monitor.malcolm.local; } + # extracted file download hedgehog redirect + location ~* ^/hh-extracted-files/([a-zA-Z0-9-\.]+)\b(.*) { + include /etc/nginx/nginx_system_resolver.conf; + set $upstream $1:8006; + rewrite ^/hh-extracted-files/([a-zA-Z0-9-\.]+)(.*)$ $2 break; + proxy_pass https://$upstream; + proxy_ssl_verify off; + proxy_set_header Host $1; + proxy_set_header X-Malcolm-Forward "/hh-extracted-files/$1"; + } + # netbox location /netbox { limit_except GET { deny all; } diff --git a/nginx/scripts/docker_entrypoint.sh b/nginx/scripts/docker_entrypoint.sh index 3a43d04bf..ee1a92bb6 100755 --- a/nginx/scripts/docker_entrypoint.sh +++ b/nginx/scripts/docker_entrypoint.sh @@ -18,6 +18,9 @@ NGINX_SSL_CONF=/etc/nginx/nginx_ssl_config.conf # a blank file just to use as an "include" placeholder for the nginx's LDAP config when LDAP is not used NGINX_BLANK_CONF=/etc/nginx/nginx_blank.conf +# "include" file for resolver directive +NGINX_RESOLVER_CONF=/etc/nginx/nginx_system_resolver.conf + # "include" file for auth_basic, prompt, and htpasswd location NGINX_BASIC_AUTH_CONF=/etc/nginx/nginx_auth_basic.conf @@ -287,6 +290,12 @@ for TEMPLATE in "$NGINX_TEMPLATES_DIR"/*.conf.template; do DOLLAR=$ envsubst < "$TEMPLATE" > "$NGINX_CONFD_DIR/$(basename "$TEMPLATE"| sed 's/\.template$//')" done +# put the DNS resolver (nameserver from /etc/resolv.conf) into NGINX_RESOLVER_CONF +DNS_SERVER="$(grep -i '^nameserver' /etc/resolv.conf | head -n1 | cut -d ' ' -f2)" +[[ -z "${DNS_SERVER:-}" ]] && DNS_SERVER="127.0.0.11" +export DNS_SERVER +echo "resolver ${DNS_SERVER};" > "${NGINX_RESOLVER_CONF}" + set -e # insert some build and runtime information into the landing page diff --git a/scripts/install.py b/scripts/install.py index 969367cd6..a1ecff792 100755 --- a/scripts/install.py +++ b/scripts/install.py @@ -78,6 +78,7 @@ eprint, flatten, LoadFileIfJson, + remove_suffix, run_process, same_file_or_dir, str2bool, @@ -113,6 +114,8 @@ TrueOrFalseNoQuote = lambda x: 'true' if x else 'false' MaxAskForValueCount = 100 +str2percent = lambda val: max(min(100, int(remove_suffix(val, '%'))), 0) if val else 0 + ################################################################################################### # get interactive user response to Y/N question @@ -1086,11 +1089,13 @@ def tweak_malcolm_runtime(self, malcolm_install_path): indexPruneNameSort = False arkimeManagePCAP = False arkimeFreeSpaceG = '10%' + extractedFileMaxSizeThreshold = '1TB' + extractedFileMaxPercentThreshold = 0 indexManagementPolicy = False indexManagementHotWarm = False indexManagementOptimizationTimePeriod = '30d' indexManagementSpiDataRetention = '90d' - indexManagementReplicas = 1 + indexManagementReplicas = 0 indexManagementHistoryInWeeks = 13 indexManagementOptimizeSessionSegments = 1 @@ -1150,14 +1155,19 @@ def tweak_malcolm_runtime(self, malcolm_install_path): ): break - if InstallerYesOrNo( + diskUsageManagementPrompt = InstallerYesOrNo( ( - 'Should Malcolm delete the oldest database indices and/or PCAP files based on available storage?' + 'Should Malcolm delete the oldest database indices and capture artifacts based on available storage?' if ((opensearchPrimaryMode == DatabaseMode.OpenSearchLocal) and (malcolmProfile == PROFILE_MALCOLM)) - else 'Should Arkime delete PCAP files based on available storage (see https://arkime.com/faq#pcap-deletion)?' + else 'Should Malcolm delete the oldest capture artifacts based on available storage?' ), - default=args.arkimeManagePCAP or bool(args.indexPruneSizeLimit), - ): + default=args.arkimeManagePCAP + or bool(args.indexPruneSizeLimit) + or bool(args.extractedFileMaxSizeThreshold) + or (args.extractedFileMaxPercentThreshold > 0), + ) + if diskUsageManagementPrompt: + # delete oldest indexes based on index pattern size if ( (malcolmProfile == PROFILE_MALCOLM) @@ -1362,6 +1372,7 @@ def tweak_malcolm_runtime(self, malcolm_install_path): ], ) if fileCarveMode and (fileCarveMode != 'none'): + loopBreaker = CountUntilException(MaxAskForValueCount, 'Invalid file preservation behavior') while filePreserveMode not in allowedFilePreserveModes and loopBreaker.increment(): filePreserveMode = InstallerChooseOne( @@ -1379,6 +1390,33 @@ def tweak_malcolm_runtime(self, malcolm_install_path): for x in allowedFilePreserveModes ], ) + + if diskUsageManagementPrompt: + loopBreaker = CountUntilException( + MaxAskForValueCount, 'Invalid Zeek extracted file prune threshold' + ) + extractedFilePruneThresholdTemp = '' + while ( + not re.match( + r'^\d+(\.\d+)?\s*[kmgtp%]?b?$', extractedFilePruneThresholdTemp, flags=re.IGNORECASE + ) + ) and loopBreaker.increment(): + extractedFilePruneThresholdTemp = InstallerAskForString( + 'Enter maximum allowed space for Zeek-extracted files (e.g., 250GB) or file system fill threshold (e.g., 90%)', + default=( + args.extractedFileMaxPercentThreshold + if args.extractedFileMaxPercentThreshold + else args.extractedFileMaxSizeThreshold + ), + ) + if extractedFilePruneThresholdTemp: + if '%' in extractedFilePruneThresholdTemp: + extractedFileMaxPercentThreshold = str2percent(extractedFilePruneThresholdTemp) + extractedFileMaxSizeThreshold = '0' + else: + extractedFileMaxPercentThreshold = 0 + extractedFileMaxSizeThreshold = extractedFilePruneThresholdTemp + fileCarveHttpServer = (malcolmProfile == PROFILE_MALCOLM) and InstallerYesOrNo( 'Expose web interface for downloading preserved files?', default=args.fileCarveHttpServer ) @@ -1951,6 +1989,18 @@ def tweak_malcolm_runtime(self, malcolm_install_path): 'EXTRACTED_FILE_PRESERVATION', filePreserveMode, ), + # total disk fill threshold for pruning zeek extracted files + EnvValue( + os.path.join(args.configDir, 'zeek.env'), + 'EXTRACTED_FILE_PRUNE_THRESHOLD_TOTAL_DISK_USAGE_PERCENT', + extractedFileMaxPercentThreshold, + ), + # zeek extracted files maximum consumption threshold + EnvValue( + os.path.join(args.configDir, 'zeek.env'), + 'EXTRACTED_FILE_PRUNE_THRESHOLD_MAX_SIZE', + extractedFileMaxSizeThreshold, + ), # HTTP server for extracted files EnvValue( os.path.join(args.configDir, 'zeek.env'), @@ -3767,6 +3817,24 @@ def main(): default='', help=f'Threshold for Arkime PCAP deletion (see https://arkime.com/faq#pcap-deletion)', ) + storageArgGroup.add_argument( + '--extracted-file-max-size-threshold', + dest='extractedFileMaxSizeThreshold', + required=False, + metavar='', + type=str, + default='', + help=f'Delete zeek-extracted files when they consume this much disk space (e.g., 250GB, 1TB, etc.)', + ) + storageArgGroup.add_argument( + '--extracted-file-total-disk-usage-percent-threshold', + dest='extractedFileMaxPercentThreshold', + required=False, + metavar='', + type=str2percent, + default=0, + help=f'Delete zeek-extracted files when the file system exceeds this percentage full (e.g., 90%, etc.)', + ) storageArgGroup.add_argument( '--delete-index-threshold', dest='indexPruneSizeLimit', diff --git a/scripts/malcolm_common.py b/scripts/malcolm_common.py index 326842b93..b0a45bee0 100644 --- a/scripts/malcolm_common.py +++ b/scripts/malcolm_common.py @@ -739,7 +739,7 @@ def DownloadToFile(url, local_filename, debug=False): | failed\s+to\s+get\s+tcp6?\s+stats\s+from\s+/proc | GET\s+/(_cat/health|api/status|sessions2-|arkime_\w+).+HTTP/[\d\.].+\b200\b | GET\s+/\s+.+\b200\b.+ELB-HealthChecker - | (GET|POST|PATCH)\s+/netbox/.+HTTP/[\d\.].+\b20[01]\b + | (GET|POST|PATCH|DELETE)\s+/netbox/.+HTTP/[\d\.]+.+\b20[\d]\b | (GET|POST)\s+/(fields|get|valueActions|views|fieldActions)\b.+bytes\s+[\d\.]+\s+ms | loaded\s+config\s+'/etc/netbox/config/ | LOG:\s+checkpoint\s+(complete|starting) diff --git a/scripts/third-party-environments/aws/ami/packer_vars.json.example b/scripts/third-party-environments/aws/ami/packer_vars.json.example index a8313c604..671761482 100644 --- a/scripts/third-party-environments/aws/ami/packer_vars.json.example +++ b/scripts/third-party-environments/aws/ami/packer_vars.json.example @@ -2,7 +2,7 @@ "aws_access_key": "XXXXXXXXXXXXXXXXXXXX", "aws_secret_key": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", "instance_type": "t2.micro", - "malcolm_tag": "v24.03.1", + "malcolm_tag": "v24.04.0", "malcolm_repo": "cisagov/Malcolm", "malcolm_uid": "1000", "ssh_username": "ec2-user", diff --git a/scripts/third-party-logs/fluent-bit-setup.ps1 b/scripts/third-party-logs/fluent-bit-setup.ps1 index 0acbd9611..7d02a6536 100644 --- a/scripts/third-party-logs/fluent-bit-setup.ps1 +++ b/scripts/third-party-logs/fluent-bit-setup.ps1 @@ -9,7 +9,7 @@ ############################################################################### $fluent_bit_version = '3.0' -$fluent_bit_full_version = '3.0.0' +$fluent_bit_full_version = '3.0.3' ############################################################################### # select an item from a menu provided in an array diff --git a/shared/bin/agg-init.sh b/shared/bin/agg-init.sh index b17910c91..cce07105d 100755 --- a/shared/bin/agg-init.sh +++ b/shared/bin/agg-init.sh @@ -47,7 +47,7 @@ if [[ -r "$SCRIPT_PATH"/common-init.sh ]]; then BadTelemetry # if we need to import prebuilt Malcolm docker images, do so now (but not if we're in a live-usb boot) - DOCKER_DRIVER="$(docker info 2>/dev/null | grep 'Storage Driver' | cut -d' ' -f3)" + DOCKER_DRIVER="$(docker info -f json 2>/dev/null | jq -r '.Driver')" if [[ -n $DOCKER_DRIVER ]] && [[ "$DOCKER_DRIVER" != "vfs" ]] && ! grep -q boot=live /proc/cmdline; then docker load -q -i /malcolm_images.tar.xz && rm -f /malcolm_images.tar.xz fi diff --git a/shared/bin/configure-capture.py b/shared/bin/configure-capture.py index 44cc1b01c..725ec2ae4 100755 --- a/shared/bin/configure-capture.py +++ b/shared/bin/configure-capture.py @@ -62,6 +62,7 @@ class Constants: MISCBEAT = 'miscbeat' ARKIMECAP = 'arkime-capture' TX_RX_SECURE = 'ssl-client-receive' + ACL_CONFIGURE = 'acl-configure' BEAT_DIR = { FILEBEAT: f'/opt/sensor/sensor_ctl/{FILEBEAT}', @@ -77,9 +78,10 @@ class Constants: BEAT_LS_HOST = 'BEAT_LS_HOST' BEAT_LS_PORT = 'BEAT_LS_PORT' BEAT_LS_SSL = 'BEAT_LS_SSL' - BEAT_LS_SSL_CA_CRT = 'BEAT_LS_SSL_CA_CRT' - BEAT_LS_SSL_CLIENT_CRT = 'BEAT_LS_SSL_CLIENT_CRT' - BEAT_LS_SSL_CLIENT_KEY = 'BEAT_LS_SSL_CLIENT_KEY' + BEAT_LS_SSL_PREFIX = 'BEAT_LS_SSL_' + BEAT_LS_SSL_CA_CRT = f'{BEAT_LS_SSL_PREFIX}CA_CRT' + BEAT_LS_SSL_CLIENT_CRT = f'{BEAT_LS_SSL_PREFIX}CLIENT_CRT' + BEAT_LS_SSL_CLIENT_KEY = f'{BEAT_LS_SSL_PREFIX}CLIENT_KEY' BEAT_LS_SSL_VERIFY = 'BEAT_LS_SSL_VERIFY' BEAT_LS_CERT_DIR_DEFAULT = "/opt/sensor/sensor_ctl/logstash-client-certificates" @@ -105,7 +107,6 @@ class Constants: # specific to arkime ARKIME_PASSWORD_SECRET = "ARKIME_PASSWORD_SECRET" - ARKIME_PACKET_ACL = "ARKIME_PACKET_ACL" ARKIME_COMPRESSION_TYPE = "ARKIME_COMPRESSION_TYPE" ARKIME_COMPRESSION_LEVEL = "ARKIME_COMPRESSION_LEVEL" ARKIME_COMPRESSION_TYPES = ( @@ -118,6 +119,9 @@ class Constants: 'zstd': (-5, 19, 3), } + # ACL for Arkime PCAP reachback and extracted files server + MALCOLM_REQUEST_ACL = "MALCOLM_REQUEST_ACL" + MSG_CONFIG_MODE = 'Configuration Mode' MSG_CONFIG_MODE_CAPTURE = 'Configure Capture' MSG_CONFIG_MODE_FORWARD = 'Configure Forwarding' @@ -129,6 +133,8 @@ class Constants: MSG_CONFIG_FILEBEAT = (f'{FILEBEAT}', f'Configure Zeek log forwarding via {FILEBEAT}') MSG_CONFIG_MISCBEAT = (f'{MISCBEAT}', f"Configure miscellaneous sensor metrics forwarding via {FILEBEAT}") MSG_CONFIG_TXRX = (f'{TX_RX_SECURE}', f'Receive client SSL files for {FILEBEAT} from Malcolm') + MSG_CONFIG_ACL = (f'{ACL_CONFIGURE}', f'Configure ACL for artifact reachback from Malcolm') + MSG_OVERWRITE_CONFIG = '{} is already configured, overwrite current settings?' MSG_IDENTIFY_NICS = 'Do you need help identifying network interfaces?' MSG_BACKGROUND_TITLE = 'Sensor Configuration' @@ -138,11 +144,18 @@ class Constants: ) MSG_CONFIG_ICS_BEST_GUESS = 'Should the sensor use "best guess" to identify potential OT/ICS traffic with Zeek?' MSG_CONFIG_ZEEK_CARVED_SCANNERS = 'Specify scanners for Zeek-carved files' + MSG_CONFIG_ZEEK_CARVED_HTTP_SERVER_ZIP = 'ZIP preserved files when downloaded via web interface?' + MSG_CONFIG_ZEEK_CARVED_HTTP_SERVER_ZIP_KEY = ( + 'Enter ZIP archive password for downloaded preserved files (or leave blank for unprotected)' + ) + MSG_CONFIG_ZEEK_CARVED_HTTP_SERVER_ENC_KEY = ( + 'Enter AES-256-CBC encryption password for downloaded preserved files (or leave blank for unencrypted)' + ) MSG_CONFIG_ZEEK_CARVING = 'Specify Zeek file carving mode' MSG_CONFIG_ZEEK_CARVING_MIMES = 'Specify file types to carve' MSG_CONFIG_CARVED_FILE_PRESERVATION = 'Specify which carved files to preserve' MSG_CONFIG_CAP_CONFIRM = 'Sensor will capture traffic with the following parameters:\n\n{}' - MSG_CONFIG_AUTOSTART_CONFIRM = 'Sensor autostart the following services:\n\n{}' + MSG_CONFIG_AUTOSTART_CONFIRM = 'Sensor will autostart the following services:\n\n{}' MSG_CONFIG_FORWARDING_CONFIRM = '{} will forward with the following parameters:\n\n{}' MSG_CONFIG_CAP_PATHS = 'Provide paths for captured PCAPs and Zeek logs' MSG_CONFIG_CAPTURE_SUCCESS = 'Capture interface set to {} in {}.\n\nReboot to apply changes.' @@ -151,7 +164,7 @@ class Constants: '{} forwarding configured:\n\n{}\n\nRestart forwarding services or reboot to apply changes.' ) MSG_CONFIG_ARKIME_VIEWER_PASSWORD = 'Specify password hash secret for Arkime viewer cluster' - MSG_CONFIG_ARKIME_PCAP_ACL = 'Specify IP addresses for PCAP retrieval ACL (one per line)' + MSG_CONFIG_REQUEST_ACL = 'Specify IP addresses for ACL for artifact reachback from Malcolm (one per line)' MSG_ERR_PLEBE_REQUIRED = 'this utility should be be run as non-privileged user' MSG_ERROR_DIR_NOT_FOUND = 'One or more of the paths specified does not exist' MSG_ERROR_FILE_NOT_FOUND = 'One or more of the files specified does not exist' @@ -184,6 +197,20 @@ class Constants: d.set_background_title(Constants.MSG_BACKGROUND_TITLE) +################################################################################################### +def rewrite_dict_to_file(vals_dict, config_file_name, inplace=True, backup='.bak'): + if vals_dict and os.path.isfile(config_file_name): + values_re = re.compile(r"\b(" + '|'.join(list(vals_dict.keys())) + r")\s*=\s*.*?$") + with fileinput.FileInput(config_file_name, inplace=inplace, backup=backup) as file: + for line in file: + line = line.rstrip("\n") + key_match = values_re.search(line) + if key_match is not None: + print(values_re.sub(r"\1=%s" % vals_dict[key_match.group(1)], line)) + else: + print(line) + + ################################################################################################### def mime_to_extension_mappings(mapfile): # get all mime-to-extension mappings from our mapping zeek file into a dictionary @@ -364,28 +391,19 @@ def main(): if len(line.strip()) > 0: name, var = remove_prefix(line, "export").partition("=")[::2] capture_config_dict[name.strip()] = var.strip().strip("'").strip('"') - if (Constants.BEAT_OS_HOST not in previous_config_values.keys()) and ( - "OS_HOST" in capture_config_dict.keys() - ): - previous_config_values[Constants.BEAT_OS_HOST] = capture_config_dict["OS_HOST"] - if (Constants.BEAT_OS_PORT not in previous_config_values.keys()) and ( - "OS_PORT" in capture_config_dict.keys() - ): - previous_config_values[Constants.BEAT_OS_PORT] = capture_config_dict["OS_PORT"] - if (Constants.BEAT_HTTP_USERNAME not in previous_config_values.keys()) and ( - "OS_USERNAME" in capture_config_dict.keys() - ): - previous_config_values[Constants.BEAT_HTTP_USERNAME] = capture_config_dict["OS_USERNAME"] - if (Constants.ARKIME_PACKET_ACL not in previous_config_values.keys()) and ( - "ARKIME_PACKET_ACL" in capture_config_dict.keys() - ): - previous_config_values[Constants.ARKIME_PACKET_ACL] = capture_config_dict[Constants.ARKIME_PACKET_ACL] - if (Constants.ARKIME_PASSWORD_SECRET not in previous_config_values.keys()) and ( - "ARKIME_PASSWORD_SECRET" in capture_config_dict.keys() - ): - previous_config_values[Constants.ARKIME_PASSWORD_SECRET] = capture_config_dict[ - Constants.ARKIME_PASSWORD_SECRET - ] + + for key, value in { + Constants.BEAT_OS_HOST: "OS_HOST", + Constants.BEAT_OS_PORT: "OS_PORT", + Constants.BEAT_HTTP_USERNAME: "OS_USERNAME", + Constants.MALCOLM_REQUEST_ACL: Constants.MALCOLM_REQUEST_ACL, + Constants.ARKIME_PASSWORD_SECRET: Constants.ARKIME_PASSWORD_SECRET, + Constants.BEAT_LS_SSL_CA_CRT: Constants.BEAT_LS_SSL_CA_CRT, + Constants.BEAT_LS_SSL_CLIENT_CRT: Constants.BEAT_LS_SSL_CLIENT_CRT, + Constants.BEAT_LS_SSL_CLIENT_KEY: Constants.BEAT_LS_SSL_CLIENT_KEY, + }.items(): + if (key not in previous_config_values.keys()) and (value in capture_config_dict.keys()): + previous_config_values[key] = capture_config_dict[value] code = d.yesno(Constants.MSG_WELCOME_TITLE, yes_label="Continue", no_label="Quit") if code == Dialog.CANCEL or code == Dialog.ESC: @@ -430,22 +448,25 @@ def main(): # get confirmation from user that we really want to do this code = d.yesno( Constants.MSG_CONFIG_AUTOSTART_CONFIRM.format( - "\n".join(sorted([f"{k}={v}" for k, v in capture_config_dict.items() if "AUTOSTART" in k])) + "\n".join( + sorted( + [ + f"{k}={v}" + for k, v in capture_config_dict.items() + if (("AUTOSTART" in k) and (not k.startswith("#"))) + ] + ) + ) ), yes_label="OK", no_label="Cancel", ) if code == Dialog.OK: # modify specified values in-place in SENSOR_CAPTURE_CONFIG file - autostart_re = re.compile(r"(\bAUTOSTART_\w+)\s*=\s*.+?$") - with fileinput.FileInput(Constants.SENSOR_CAPTURE_CONFIG, inplace=True, backup='.bak') as file: - for line in file: - line = line.rstrip("\n") - autostart_match = autostart_re.search(line) - if autostart_match is not None: - print(autostart_re.sub(r"\1=%s" % capture_config_dict[autostart_match.group(1)], line)) - else: - print(line) + rewrite_dict_to_file( + {k: v for (k, v) in capture_config_dict.items() if k.startswith('AUTOSTART_')}, + Constants.SENSOR_CAPTURE_CONFIG, + ) # hooray code = d.msgbox(text=Constants.MSG_CONFIG_AUTOSTART_SUCCESS) @@ -458,7 +479,9 @@ def main(): # previously used capture interfaces preselected_ifaces = set([x.strip() for x in capture_config_dict["CAPTURE_INTERFACE"].split(',')]) - while (len(available_adapters) > 0) and (d.yesno(Constants.MSG_IDENTIFY_NICS) == Dialog.OK): + while (len(available_adapters) > 0) and ( + d.yesno(Constants.MSG_IDENTIFY_NICS, yes_label="No", no_label="Yes") != Dialog.OK + ): code, blinky_iface = d.radiolist( Constants.MSG_SELECT_BLINK_INTERFACE, choices=[(adapter.name, adapter.description, False) for adapter in available_adapters], @@ -526,19 +549,6 @@ def main(): ) prev_capture_filter = capture_filter - # regular expressions for selected name=value pairs to update in configuration file - capture_interface_re = re.compile(r"(\bCAPTURE_INTERFACE)\s*=\s*.+?$") - capture_filter_re = re.compile(r"(\bCAPTURE_FILTER)\s*=\s*.*?$") - pcap_path_re = re.compile(r"(\bPCAP_PATH)\s*=\s*.+?$") - zeek_path_re = re.compile(r"(\bZEEK_LOG_PATH)\s*=\s*.+?$") - zeek_carve_re = re.compile(r"(\bZEEK_EXTRACTOR_MODE)\s*=\s*.+?$") - zeek_file_preservation_re = re.compile(r"(\bEXTRACTED_FILE_PRESERVATION)\s*=\s*.+?$") - zeek_carve_override_re = re.compile(r"(\bZEEK_EXTRACTOR_OVERRIDE_FILE)\s*=\s*.*?$") - zeek_file_watch_re = re.compile(r"(\bZEEK_FILE_WATCH)\s*=\s*.+?$") - zeek_file_scanner_re = re.compile(r"(\bZEEK_FILE_SCAN_\w+)\s*=\s*.+?$") - disable_ics_all_re = re.compile(r"(\bZEEK_DISABLE_ICS_ALL)\s*=\s*.+?$") - ics_best_guess_re = re.compile(r"(\bZEEK_DISABLE_BEST_GUESS_ICS)\s*=\s*.+?$") - # get paths for captured PCAP and Zeek files while True: code, path_values = d.form( @@ -565,8 +575,10 @@ def main(): code = d.msgbox(text=Constants.MSG_ERROR_DIR_NOT_FOUND) # enable/disable ICs - ics_network = d.yesno(Constants.MSG_CONFIG_ICS_ANALYZERS) == Dialog.OK - ics_best_guess = ics_network and (d.yesno(Constants.MSG_CONFIG_ICS_BEST_GUESS) == Dialog.OK) + ics_network = d.yesno(Constants.MSG_CONFIG_ICS_ANALYZERS, yes_label="No", no_label="Yes") != Dialog.OK + ics_best_guess = ics_network and ( + d.yesno(Constants.MSG_CONFIG_ICS_BEST_GUESS, yes_label="No", no_label="Yes") != Dialog.OK + ) # configure file carving code, zeek_carve_mode = d.radiolist( @@ -626,6 +638,8 @@ def main(): mime_tags = [] capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"] = "" zeek_carved_file_preservation = PRESERVE_NONE + zeek_carved_file_http_server_zip = False + zeek_carved_file_http_serve_encrypt_key = '' if zeek_carve_mode.startswith(Constants.ZEEK_FILE_CARVING_CUSTOM): # get all known mime-to-extension mappings into a dictionary @@ -726,6 +740,19 @@ def main(): ]: capture_config_dict[key] = "false" + if zeek_carved_file_preservation != PRESERVE_NONE: + zeek_carved_file_http_server_zip = ( + d.yesno(Constants.MSG_CONFIG_ZEEK_CARVED_HTTP_SERVER_ZIP) == Dialog.OK + ) + code, zeek_carved_file_http_serve_encrypt_key = d.inputbox( + ( + Constants.MSG_CONFIG_ZEEK_CARVED_HTTP_SERVER_ZIP_KEY + if zeek_carved_file_http_server_zip + else Constants.MSG_CONFIG_ZEEK_CARVED_HTTP_SERVER_ENC_KEY + ), + init=capture_config_dict.get("EXTRACTED_FILE_HTTP_SERVER_KEY", 'infected'), + ) + # reconstitute dictionary with user-specified values capture_config_dict["CAPTURE_INTERFACE"] = ",".join(selected_ifaces) capture_config_dict["CAPTURE_FILTER"] = capture_filter @@ -733,6 +760,10 @@ def main(): capture_config_dict["ZEEK_LOG_PATH"] = path_values[1] capture_config_dict["ZEEK_EXTRACTOR_MODE"] = zeek_carve_mode capture_config_dict["EXTRACTED_FILE_PRESERVATION"] = zeek_carved_file_preservation + capture_config_dict["EXTRACTED_FILE_HTTP_SERVER_ZIP"] = ( + 'true' if zeek_carved_file_http_server_zip else 'false' + ) + capture_config_dict["EXTRACTED_FILE_HTTP_SERVER_KEY"] = zeek_carved_file_http_serve_encrypt_key capture_config_dict["ZEEK_DISABLE_ICS_ALL"] = '' if ics_network else 'true' capture_config_dict["ZEEK_DISABLE_BEST_GUESS_ICS"] = '' if ics_best_guess else 'true' @@ -754,49 +785,31 @@ def main(): ) if code == Dialog.OK: # modify specified values in-place in SENSOR_CAPTURE_CONFIG file - with fileinput.FileInput(Constants.SENSOR_CAPTURE_CONFIG, inplace=True, backup='.bak') as file: - for line in file: - line = line.rstrip("\n") - if capture_interface_re.search(line) is not None: - print(capture_interface_re.sub(r"\1=%s" % ",".join(selected_ifaces), line)) - elif zeek_carve_override_re.search(line) is not None: - print( - zeek_carve_override_re.sub( - r'\1="%s"' % capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"], line - ) - ) - elif zeek_carve_re.search(line) is not None: - print(zeek_carve_re.sub(r"\1=%s" % zeek_carve_mode, line)) - elif zeek_file_preservation_re.search(line) is not None: - print(zeek_file_preservation_re.sub(r"\1=%s" % zeek_carved_file_preservation, line)) - elif capture_filter_re.search(line) is not None: - print(capture_filter_re.sub(r'\1="%s"' % capture_filter, line)) - elif pcap_path_re.search(line) is not None: - print(pcap_path_re.sub(r'\1="%s"' % capture_config_dict["PCAP_PATH"], line)) - elif zeek_path_re.search(line) is not None: - print(zeek_path_re.sub(r'\1="%s"' % capture_config_dict["ZEEK_LOG_PATH"], line)) - elif zeek_file_watch_re.search(line) is not None: - print(zeek_file_watch_re.sub(r"\1=%s" % capture_config_dict["ZEEK_FILE_WATCH"], line)) - elif disable_ics_all_re.search(line) is not None: - print( - disable_ics_all_re.sub(r'\1=%s' % capture_config_dict["ZEEK_DISABLE_ICS_ALL"], line) - ) - elif ics_best_guess_re.search(line) is not None: - print( - ics_best_guess_re.sub( - r'\1=%s' % capture_config_dict["ZEEK_DISABLE_BEST_GUESS_ICS"], line - ) - ) - else: - zeek_file_scanner_match = zeek_file_scanner_re.search(line) - if zeek_file_scanner_match is not None: - print( - zeek_file_scanner_re.sub( - r"\1=%s" % capture_config_dict[zeek_file_scanner_match.group(1)], line - ) - ) - else: - print(line) + rewrite_dict_to_file( + { + "CAPTURE_FILTER": '"' + capture_config_dict["CAPTURE_FILTER"] + '"', + "CAPTURE_INTERFACE": capture_config_dict["CAPTURE_INTERFACE"], + "EXTRACTED_FILE_HTTP_SERVER_KEY": '"' + + capture_config_dict["EXTRACTED_FILE_HTTP_SERVER_KEY"] + + '"', + "EXTRACTED_FILE_HTTP_SERVER_ZIP": capture_config_dict["EXTRACTED_FILE_HTTP_SERVER_ZIP"], + "EXTRACTED_FILE_PRESERVATION": capture_config_dict["EXTRACTED_FILE_PRESERVATION"], + "PCAP_PATH": '"' + capture_config_dict["PCAP_PATH"] + '"', + "ZEEK_DISABLE_BEST_GUESS_ICS": capture_config_dict["ZEEK_DISABLE_BEST_GUESS_ICS"], + "ZEEK_DISABLE_ICS_ALL": capture_config_dict["ZEEK_DISABLE_ICS_ALL"], + "ZEEK_EXTRACTOR_MODE": capture_config_dict["ZEEK_EXTRACTOR_MODE"], + "ZEEK_LOG_PATH": '"' + capture_config_dict["ZEEK_LOG_PATH"] + '"', + "ZEEK_EXTRACTOR_OVERRIDE_FILE": '"' + + capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"] + + '"', + "ZEEK_FILE_SCAN_CAPA": capture_config_dict["ZEEK_FILE_SCAN_CAPA"], + "ZEEK_FILE_SCAN_CLAMAV": capture_config_dict["ZEEK_FILE_SCAN_CLAMAV"], + "ZEEK_FILE_SCAN_VTOT": capture_config_dict["ZEEK_FILE_SCAN_VTOT"], + "ZEEK_FILE_SCAN_YARA": capture_config_dict["ZEEK_FILE_SCAN_YARA"], + "ZEEK_FILE_WATCH": capture_config_dict["ZEEK_FILE_WATCH"], + }, + Constants.SENSOR_CAPTURE_CONFIG, + ) # write out file carving overrides if specified if (len(mime_tags) > 0) and (len(capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"]) > 0): @@ -835,8 +848,9 @@ def main(): Constants.MSG_CONFIG_ARKIME, Constants.MSG_CONFIG_FILEBEAT, Constants.MSG_CONFIG_MISCBEAT, + Constants.MSG_CONFIG_ACL, Constants.MSG_CONFIG_TXRX, - ][: 4 if txRxScript else -1], + ][: 5 if txRxScript else -1], ) if code != Dialog.OK: raise CancelledError @@ -893,22 +907,6 @@ def main(): if arkime_password: arkime_config_dict[Constants.ARKIME_PASSWORD_SECRET] = arkime_password - # get list of IP addresses allowed for packet payload retrieval - lines = previous_config_values[Constants.ARKIME_PACKET_ACL].split(",") - lines.append(opensearch_config_dict[Constants.BEAT_OS_HOST]) - code, lines = d.editbox_str( - "\n".join(list(filter(None, list(set(lines))))), title=Constants.MSG_CONFIG_ARKIME_PCAP_ACL - ) - if code != Dialog.OK: - raise CancelledError - arkime_config_dict[Constants.ARKIME_PACKET_ACL] = ','.join( - [ - ip - for ip in list(set(filter(None, [x.strip() for x in lines.split('\n')]))) - if isipaddress(ip) - ] - ) - # arkime PCAP compression settings code, compression_type = d.radiolist( Constants.MSG_CONFIG_ARKIME_COMPRESSION, @@ -971,21 +969,7 @@ def main(): previous_config_values = opensearch_config_dict.copy() # modify specified values in-place in SENSOR_CAPTURE_CONFIG file - opensearch_values_re = re.compile( - r"\b(" + '|'.join(list(arkime_config_dict.keys())) + r")\s*=\s*.*?$" - ) - with fileinput.FileInput(Constants.SENSOR_CAPTURE_CONFIG, inplace=True, backup='.bak') as file: - for line in file: - line = line.rstrip("\n") - opensearch_key_match = opensearch_values_re.search(line) - if opensearch_key_match is not None: - print( - opensearch_values_re.sub( - r"\1=%s" % arkime_config_dict[opensearch_key_match.group(1)], line - ) - ) - else: - print(line) + rewrite_dict_to_file(arkime_config_dict, Constants.SENSOR_CAPTURE_CONFIG) # hooray code = d.msgbox( @@ -1137,10 +1121,8 @@ def main(): 'SSL Certificate Authorities File', 1, 1, - ( - previous_config_values[Constants.BEAT_LS_SSL_CA_CRT] - if Constants.BEAT_LS_SSL_CA_CRT in previous_config_values - else f"{Constants.BEAT_LS_CERT_DIR_DEFAULT}/ca.crt" + previous_config_values.get( + Constants.BEAT_LS_SSL_CA_CRT, f"{Constants.BEAT_LS_CERT_DIR_DEFAULT}/ca.crt" ), 1, 35, @@ -1151,10 +1133,9 @@ def main(): 'SSL Certificate File', 2, 1, - ( - previous_config_values[Constants.BEAT_LS_SSL_CLIENT_CRT] - if Constants.BEAT_LS_SSL_CLIENT_CRT in previous_config_values - else f"{Constants.BEAT_LS_CERT_DIR_DEFAULT}/client.crt" + previous_config_values.get( + Constants.BEAT_LS_SSL_CLIENT_CRT, + f"{Constants.BEAT_LS_CERT_DIR_DEFAULT}/client.crt", ), 2, 35, @@ -1165,10 +1146,9 @@ def main(): 'SSL Key File', 3, 1, - ( - previous_config_values[Constants.BEAT_LS_SSL_CLIENT_KEY] - if Constants.BEAT_LS_SSL_CLIENT_KEY in previous_config_values - else f"{Constants.BEAT_LS_CERT_DIR_DEFAULT}/client.key" + previous_config_values.get( + Constants.BEAT_LS_SSL_CLIENT_KEY, + f"{Constants.BEAT_LS_CERT_DIR_DEFAULT}/client.key", ), 3, 35, @@ -1254,6 +1234,51 @@ def main(): # keystore list failed raise Exception(Constants.MSG_ERROR_KEYSTORE.format(fwd_mode, "\n".join(add_results))) + # also write the TLS files back out to the config file + rewrite_dict_to_file( + { + k: v + for (k, v) in forwarder_dict.items() + if k.startswith(Constants.BEAT_LS_SSL_PREFIX) and os.path.isfile(str(v)) + }, + Constants.SENSOR_CAPTURE_CONFIG, + ) + + elif fwd_mode == Constants.ACL_CONFIGURE: + + # get list of IP addresses allowed for packet payload retrieval + acl_config_dict = defaultdict(str) + lines = previous_config_values[Constants.MALCOLM_REQUEST_ACL].split(",") + if Constants.BEAT_OS_HOST in previous_config_values and ( + previous_config_values[Constants.BEAT_OS_HOST] + not in ('', '127.0.0.1', '::1', '0.0.0.0', '::', 'localhost') + ): + lines.append(previous_config_values[Constants.BEAT_OS_HOST]) + code, lines = d.editbox_str( + "\n".join(list(filter(None, list(set(lines))))), title=Constants.MSG_CONFIG_REQUEST_ACL + ) + if code != Dialog.OK: + raise CancelledError + + # modify specified ACL value in-place in SENSOR_CAPTURE_CONFIG file + newAclValsDict = { + Constants.MALCOLM_REQUEST_ACL: ','.join( + [ + ip + for ip in list(set(filter(None, [x.strip() for x in lines.split('\n')]))) + if isipaddress(ip) + ] + ) + } + rewrite_dict_to_file(newAclValsDict, Constants.SENSOR_CAPTURE_CONFIG) + + # hooray + code = d.msgbox( + text=Constants.MSG_CONFIG_FORWARDING_SUCCESS.format( + fwd_mode, "\n".join(newAclValsDict[Constants.MALCOLM_REQUEST_ACL].split(',')) + ) + ) + elif (fwd_mode == Constants.TX_RX_SECURE) and txRxScript: # use tx-rx-secure.sh (via croc) to get certs from Malcolm code = d.msgbox(text='Run auth_setup on Malcolm "Transfer self-signed client certificates..."') @@ -1310,6 +1335,19 @@ def main(): p.poll() + keyFiles = { + Constants.BEAT_LS_SSL_CA_CRT: os.path.join(Constants.BEAT_LS_CERT_DIR_DEFAULT, 'ca.crt'), + Constants.BEAT_LS_SSL_CLIENT_CRT: os.path.join( + Constants.BEAT_LS_CERT_DIR_DEFAULT, 'client.crt' + ), + Constants.BEAT_LS_SSL_CLIENT_KEY: os.path.join( + Constants.BEAT_LS_CERT_DIR_DEFAULT, 'client.key' + ), + } + if all([os.path.isfile(v) for (k, v) in keyFiles.items()]): + # also write the TLS files back out to the config file + rewrite_dict_to_file(keyFiles, Constants.SENSOR_CAPTURE_CONFIG) + else: # we're here without a valid forwarding type selection?!? raise Exception(Constants.MSG_MESSAGE_ERROR.format(Constants.MSG_INVALID_FORWARDING_TYPE)) diff --git a/file-monitor/scripts/extracted_files_http_server.py b/shared/bin/extracted_files_http_server.py similarity index 80% rename from file-monitor/scripts/extracted_files_http_server.py rename to shared/bin/extracted_files_http_server.py index 18a7bdc40..7f07730ad 100755 --- a/file-monitor/scripts/extracted_files_http_server.py +++ b/shared/bin/extracted_files_http_server.py @@ -7,21 +7,23 @@ # be aes-256-cbc encrypted in a way that's compatible with: # openssl enc -aes-256-cbc -d -in encrypted.data -out decrypted.data +import atexit import argparse import dominate +import functools import hashlib import magic import os import re +import ssl import sys +import time from Crypto.Cipher import AES from datetime import datetime, timedelta, UTC from dominate.tags import * -from http.server import HTTPServer, SimpleHTTPRequestHandler -from socketserver import ThreadingMixIn +from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler from stat import S_IFREG from stream_zip import ZIP_32, stream_zip -from threading import Thread from malcolm_utils import ( eprint, @@ -29,6 +31,7 @@ EVP_KEY_SIZE, OPENSSL_ENC_MAGIC, PKCS5_SALT_LEN, + pushd, remove_prefix, sizeof_fmt, str2bool, @@ -41,7 +44,9 @@ script_name = os.path.basename(__file__) script_path = os.path.dirname(os.path.realpath(__file__)) orig_path = os.getcwd() -filename_truncate_len = 20 +filename_truncate_len_malcolm = 20 +filename_truncate_len = 32 +malcolm_forward_header = 'X-Malcolm-Forward' ################################################################################################### @@ -70,7 +75,7 @@ class HTTPHandler(SimpleHTTPRequestHandler): def translate_path(self, path): path = SimpleHTTPRequestHandler.translate_path(self, path) relpath = os.path.relpath(path, os.getcwd()) - fullpath = os.path.join(self.server.base_path, relpath) + fullpath = os.path.join(self.directory, relpath) return fullpath, relpath # override do_GET for fancy directory listing and so that files are encrypted/zipped, if requested @@ -78,8 +83,12 @@ def do_GET(self): global debug global args + showMalcolmCols = args.malcolm or (malcolm_forward_header in dict(self.headers)) + assetsDirRespReplacer = f"{str(dict(self.headers).get(malcolm_forward_header, ''))}{args.assetsDirRespReplacer}" + fullpath, relpath = self.translate_path(self.path) fileBaseName = os.path.basename(fullpath) + fnameDispLen = filename_truncate_len_malcolm if showMalcolmCols else filename_truncate_len tomorrowStr = (datetime.now(UTC) + timedelta(days=1)).isoformat().split('.')[0] @@ -105,10 +114,10 @@ def do_GET(self): with doc.head: meta(charset="utf-8") meta(name="viewport", content="width=device-width, initial-scale=1, shrink-to-fit=no") - link(rel="icon", href=f"{args.assetsDirRespReplacer}favicon.ico", type="image/x-icon") - link(rel="stylesheet", href=f"{args.assetsDirRespReplacer}css/bootstrap-icons.css", type="text/css") - link(rel="stylesheet", href=f"{args.assetsDirRespReplacer}css/google-fonts.css", type="text/css") - link(rel="stylesheet", href=f"{args.assetsDirRespReplacer}css/styles.css", type="text/css") + link(rel="icon", href=f"{assetsDirRespReplacer}favicon.ico", type="image/x-icon") + link(rel="stylesheet", href=f"{assetsDirRespReplacer}css/bootstrap-icons.css", type="text/css") + link(rel="stylesheet", href=f"{assetsDirRespReplacer}css/google-fonts.css", type="text/css") + link(rel="stylesheet", href=f"{assetsDirRespReplacer}css/styles.css", type="text/css") # with doc: @@ -130,7 +139,7 @@ def do_GET(self): th("Type" if args.magic else "Extension"), th("Size"), ) - if args.malcolm: + if showMalcolmCols: t.add( th("Source"), th("IDs"), @@ -143,7 +152,7 @@ def do_GET(self): td("Directory"), td(''), ) - if args.malcolm: + if showMalcolmCols: t.add(th(), th(), th()) # content rows (files and directories) @@ -159,7 +168,7 @@ def do_GET(self): td("Directory"), td(''), ) - if args.malcolm: + if showMalcolmCols: t.add(th(), th(), th()) except Exception as e: eprint(f'Error with directory "{dirname}"": {e}') @@ -178,7 +187,7 @@ def do_GET(self): fmatch = None fsource = '' fids = list() - if args.malcolm: + if showMalcolmCols: # determine if filename is in a pattern we recognize fmatch = carvedFileRegex.search(filename) if fmatch is None: @@ -229,8 +238,8 @@ def do_GET(self): td( a( ( - (filename[:filename_truncate_len] + '...') - if len(filename) > filename_truncate_len + (filename[:fnameDispLen] + '...') + if len(filename) > fnameDispLen else filename ), href=f'{filename}', @@ -252,37 +261,38 @@ def do_GET(self): ) # show special malcolm columns if requested - if args.malcolm and fmatch is not None: - # list carve source, IDs, and timestamp - t.add( - td( - fsource, - style="text-align: center", - ), - td( - [ - a( - fid, - href=f'/arkime/idark2dash/filter?start={timestampStartFilterStr}&stop={tomorrowStr}&field=event.id&value={fid}', - target="_blank", - ) - for fid in fids - ], - style="text-align: center", - ), - td( - ( - timestamp.strftime("%Y-%m-%d %H:%M:%S") - if timestamp - else timestampStr + if showMalcolmCols: + if fmatch is not None: + # list carve source, IDs, and timestamp + t.add( + td( + fsource, + style="text-align: center", ), - title=timestampStr, - style="text-align: center", - ), - ) - else: - # file name format was not recognized, so extra columns are empty - t.add(th(), th(), th()) + td( + [ + a( + fid, + href=f'/arkime/idark2dash/filter?start={timestampStartFilterStr}&stop={tomorrowStr}&field=event.id&value={fid}', + target="_blank", + ) + for fid in fids + ], + style="text-align: center", + ), + td( + ( + timestamp.strftime("%Y-%m-%d %H:%M:%S") + if timestamp + else timestampStr + ), + title=timestampStr, + style="text-align: center", + ), + ) + else: + # file name format was not recognized, so extra columns are empty + t.add(th(), th(), th()) except Exception as e: eprint(f'Error with file "{filename}": {e}') @@ -299,27 +309,28 @@ def do_GET(self): ) with div(cls="col-lg-6 h-100 text-center text-lg-end my-auto").add(ul(cls="list-inline mb-0")): - li(cls="list-inline-item").add(a(href=f'/', target="_blank")).add( - i(cls="bi bi-house fs-3", title="Malcolm") - ) - li(cls="list-inline-item").add(a(href=f'/readme/', target="_blank")).add( - i(cls="bi bi-question-circle fs-3", title="Documentation") - ) - li(cls="list-inline-item").add( - a( - href=f'/dashboards/app/dashboards#/view/9ee51f94-3316-4fc5-bd89-93a52af69714', - target="_blank", + if showMalcolmCols: + li(cls="list-inline-item").add(a(href=f'/', target="_blank")).add( + i(cls="bi bi-house fs-3", title="Malcolm") + ) + li(cls="list-inline-item").add(a(href=f'/readme/', target="_blank")).add( + i(cls="bi bi-question-circle fs-3", title="Documentation") + ) + li(cls="list-inline-item").add( + a( + href=f'/dashboards/app/dashboards#/view/9ee51f94-3316-4fc5-bd89-93a52af69714', + target="_blank", + ) + ).add(i(cls="bi bi-bar-chart-line fs-3", title="Dashboards")) + li(cls="list-inline-item").add(a(href=f'/arkime/sessions/', target="_blank")).add( + i(cls="bi bi-table fs-3", title="Arkime") ) - ).add(i(cls="bi bi-bar-chart-line fs-3", title="Dashboards")) - li(cls="list-inline-item").add(a(href=f'/arkime/sessions/', target="_blank")).add( - i(cls="bi bi-table fs-3", title="Arkime") - ) li(cls="list-inline-item").add( a(href=f'https://github.com/cisagov/Malcolm/', target="_blank") ).add(i(cls="bi-github fs-3", title="GitHub")) - script(type="text/javascript", src=f"{args.assetsDirRespReplacer}js/bootstrap.bundle.min.js") - script(type="text/javascript", src=f"{args.assetsDirRespReplacer}js/scripts.js") + script(type="text/javascript", src=f"{assetsDirRespReplacer}js/bootstrap.bundle.min.js") + script(type="text/javascript", src=f"{assetsDirRespReplacer}js/scripts.js") # send directory listing HTML to web client self.wfile.write(str.encode(str(doc))) @@ -417,18 +428,23 @@ def do_GET(self): ################################################################################################### # -class ThreadingHTTPServer(ThreadingMixIn, HTTPServer): - def __init__(self, base_path, server_address, RequestHandlerClass=HTTPHandler): - self.base_path = base_path - HTTPServer.__init__(self, server_address, RequestHandlerClass) - - -################################################################################################### -# -def serve_on_port(path: str, port: int): - server = ThreadingHTTPServer(path, ("", port)) - print(f"serving {path} at port {port}") - server.serve_forever() +def serve_on_port( + path, + port, + tls=False, + tls_key_file=None, + tls_cert_file=None, + server_class=ThreadingHTTPServer, + handler_class=HTTPHandler, +): + with pushd(path): + server = server_class(("", port), functools.partial(handler_class, directory=path)) + if tlsOk := (tls and os.path.isfile(str(tls_key_file)) and os.path.isfile(str(tls_cert_file))): + ctx = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_SERVER) + ctx.load_cert_chain(certfile=tls_cert_file, keyfile=tls_key_file) + server.socket = ctx.wrap_socket(server.socket, server_side=True) + print(f"serving {path} at port {port}{' over TLS' if tlsOk else ''}") + server.serve_forever() ################################################################################################### @@ -442,6 +458,7 @@ def main(): defaultZip = os.getenv('EXTRACTED_FILE_HTTP_SERVER_ZIP', 'false') defaultRecursive = os.getenv('EXTRACTED_FILE_HTTP_SERVER_RECURSIVE', 'false') defaultMagic = os.getenv('EXTRACTED_FILE_HTTP_SERVER_MAGIC', 'false') + defaultTls = os.getenv('EXTRACTED_FILE_HTTP_SERVER_TLS', 'false') defaultLinks = os.getenv('EXTRACTED_FILE_HTTP_SERVER_LINKS', 'false') defaultMalcolm = os.getenv('EXTRACTED_FILE_HTTP_SERVER_MALCOLM', 'false') defaultPort = int(os.getenv('EXTRACTED_FILE_HTTP_SERVER_PORT', 8440)) @@ -474,6 +491,33 @@ def main(): type=int, default=defaultPort, ) + parser.add_argument( + '-t', + '--tls', + dest='tls', + type=str2bool, + nargs='?', + const=True, + default=defaultTls, + metavar='true|false', + help=f"Serve with TLS (must specify --tls-keyfile and --tls-certfile)", + ) + parser.add_argument( + '--tls-keyfile', + dest='tlsKeyFile', + help=f'TLS Key File', + metavar='', + type=str, + default=os.getenv('EXTRACTED_FILE_HTTP_SERVER_TLS_KEYFILE', None), + ) + parser.add_argument( + '--tls-certfile', + dest='tlsCertFile', + help=f'TLS Certificate File', + metavar='', + type=str, + default=os.getenv('EXTRACTED_FILE_HTTP_SERVER_TLS_CERTFILE', None), + ) parser.add_argument( '-d', '--directory', @@ -591,7 +635,13 @@ def main(): if args.assetsDirRespReplacer: args.assetsDirRespReplacer = os.path.join(args.assetsDirRespReplacer, '') - Thread(target=serve_on_port, args=[args.serveDir, args.port]).start() + serve_on_port( + path=args.serveDir, + port=args.port, + tls=args.tls, + tls_key_file=args.tlsKeyFile, + tls_cert_file=args.tlsCertFile, + ) ################################################################################################### diff --git a/shared/bin/prune_files.sh b/shared/bin/prune_files.sh index 9d4c63f99..0e94ee305 100755 --- a/shared/bin/prune_files.sh +++ b/shared/bin/prune_files.sh @@ -1,95 +1,115 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. # recursion depth (1 = not recursive) DEPTH=1 -# threshold is an integer percentage between 1-100; the script will prune until disk usage drops below the threshold -THRESHOLD=90 # defaults to "prune when usage >= 90%"; +# THRESHOLD_PCT and MAXSIZE_GB define pruning triggers; either or both may trigger pruning. +# A value of 0 means that trigger is unused +# If either trigger condition matches, the script will prune until disk usage drops **below** the thresholds + +# THRESHOLD_PCT is an integer percentage between 1-100 ("prune when disk usage >= THRESHOLD_PCT%") +THRESHOLD_PCT=0 +# MAXSIZE_GB is an integer representing gigabytes ("prune when path contents >= MAXSIZE_GB"), although +# it can be specified as a human-readable data size (e.g., 10G) if humanfriendly is available +MAXSIZE_GB=0 # if specified, this script will check and prune every $INTERVAL seconds INTERVAL=0 # defaults to "run once then exit" VERBOSE=0 # defaults to "not verbose" - -while getopts t:p:i:rv opts; do +while getopts i:m:p:rt:v opts; do case ${opts} in - p) PRUNE_PATH=${OPTARG} ;; - t) THRESHOLD=${OPTARG} ;; i) INTERVAL=${OPTARG} ;; + m) MAXSIZE_GB=${OPTARG} ;; + p) PRUNE_PATH=${OPTARG} ;; r) DEPTH=999 ;; + t) THRESHOLD_PCT=${OPTARG} ;; v) VERBOSE=1 ;; esac done INT_RE='^[0-9]+$' -if [ -z $PRUNE_PATH ] || [ ! -e "$PRUNE_PATH" ] || ! pushd >/dev/null 2>&1 $PRUNE_PATH ; then - echo "Please specify prune path with -p" +if [[ -z "$PRUNE_PATH" ]] || [[ ! -e "$PRUNE_PATH" ]] || ! pushd >/dev/null 2>&1 $PRUNE_PATH ; then + echo "Please specify prune path with -p" >&2 exit 1 fi -if [ -z $THRESHOLD ] || [[ ! "$THRESHOLD" =~ $INT_RE ]] || ! [ "$THRESHOLD" -ge 1 -a "$THRESHOLD" -le 100 ] ; then - echo "Please specify prune threshold (percentage, 1-100) with -t" +if [[ ! "$INTERVAL" =~ $INT_RE ]] || ! (( "$INTERVAL" >= 0 && "$INTERVAL" <= 86400 )) ; then + echo "Please specify prune check interval (seconds, 0-86400) with -i (0 = run once)" >&2 exit 1 fi -if [[ ! "$INTERVAL" =~ $INT_RE ]] || ! [ "$INTERVAL" -ge 0 -a "$INTERVAL" -le 86400 ] ; then - echo "Please specify prune check interval (seconds, 0-86400) with -i (0 = run once)" +if [[ ! "$MAXSIZE_GB" =~ $INT_RE ]] && command -v humanfriendly >/dev/null 2>&1; then + # convert max-size from a string (e.g., 1TB) to the number of gigabytes (1000) + MAXSIZE_BYTES="$(humanfriendly --parse-size "$MAXSIZE_GB" 2>/dev/null)" + if [[ -n "$MAXSIZE_BYTES" ]] && [[ "$MAXSIZE_BYTES" =~ $INT_RE ]]; then + MAXSIZE_GB="$(echo "$MAXSIZE_BYTES" | awk '{printf "%.0f\n", $1/1000/1000/1000}')" + fi +fi + +if ( [[ -z "$THRESHOLD_PCT" ]] || [[ ! "$THRESHOLD_PCT" =~ $INT_RE ]] || ! (( "$THRESHOLD_PCT" >= 1 && "$THRESHOLD_PCT" <= 100)) ) && + ( [[ -z "$MAXSIZE_GB" ]] || [[ ! "$MAXSIZE_GB" =~ $INT_RE ]] || ! (( "$MAXSIZE_GB" >= 1 )) ); then + echo "Please specify at least one prune trigger: threshold (percentage, 1-100) with -t; or, maximum size (gigabytes, >= 1) with -m" >&2 exit 1 fi while true ; do # check initial disk capacity - USAGE=$(df -k . | awk '{gsub("%",""); capacity=$5}; END {print capacity}') - if [ $USAGE -gt $THRESHOLD ] ; then + USAGE_PCT=$(df -k . 2>/dev/null | awk '{gsub("%",""); capacity=$5}; END {print capacity}') + USAGE_GB=$(du -sb . 2>/dev/null | awk '{printf "%.0f\n", $1/1000/1000/1000}') + if ( (( $THRESHOLD_PCT > 0 )) && (( $USAGE_PCT > $THRESHOLD_PCT )) ) || ( (( $MAXSIZE_GB > 0 )) && (( $USAGE_GB > $MAXSIZE_GB )) ); then # we have exceeded the threshold, see if there is something to prune - [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE% of capacity, pruning..." + [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE_PCT% of capacity ($USAGE_GB GB), pruning..." >&2 # read files by modification time, oldest first, deleting until we've dropped below the threshold - DELETED=0 + DELETED_FILES=0 + DELETED_BYTES=0 while IFS='' read -r -d ' ' FILE_TIME && IFS='' read -r -d ' ' FILE_SIZE && IFS='' read -r -d '' FILE_TO_DELETE; do FILE_SIZE_HUMAN=$(numfmt --to=iec-i --suffix=B $FILE_SIZE) FILE_TIME_HUMAN=$(date -u -d @$FILE_TIME) - if [ -f "$FILE_TO_DELETE" ]; then + if [[ -f "$FILE_TO_DELETE" ]]; then if rm -f "$FILE_TO_DELETE" ; then - DELETED=$((DELETED+1)) + DELETED_FILES=$((DELETED_FILES+1)) + DELETED_BYTES=$((DELETED_BYTES+FILE_SIZE)) - echo "Pruned \"$FILE_TO_DELETE\" ($FILE_SIZE_HUMAN, $FILE_TIME_HUMAN)" + [[ "$VERBOSE" == "1" ]] && echo "Pruned \"$FILE_TO_DELETE\" ($FILE_SIZE_HUMAN, $FILE_TIME_HUMAN)" >&2 # re-check disk capacity - USAGE=$(df -k . | awk '{gsub("%",""); capacity=$5}; END {print capacity}') - if [ $USAGE -gt $THRESHOLD ] ; then + USAGE_PCT=$(df -k . 2>/dev/null | awk '{gsub("%",""); capacity=$5}; END {print capacity}') + USAGE_GB=$(du -sb . 2>/dev/null | awk '{printf "%.0f\n", $1/1000/1000/1000}') + if ( (( $THRESHOLD_PCT > 0 )) && (( $USAGE_PCT > $THRESHOLD_PCT )) ) || ( (( $MAXSIZE_GB > 0 )) && (( $USAGE_GB > $MAXSIZE_GB )) ); then # we still exceed the threshold, continue to loop - [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE% of capacity, pruning..." + [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE_PCT% of capacity ($USAGE_GB GB), pruning..." >&2 else # we're below the limit, break - [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE% of capacity" + [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE_PCT% of capacity ($USAGE_GB GB)" >&2 break fi fi # file was rm'ed fi # file exists - + # exclude files in our Zeek live capture directory done < <(find . -xdev -mindepth 1 -maxdepth $DEPTH -ignore_readdir_race -type f \( ! -path '*/spool/*' -o -path '*/spool/tmp*' \) -printf '%T@ %s %p\0' 2>/dev/null | sort -zn 2>/dev/null) - if [ $DELETED -gt 0 ] ; then - [[ "$VERBOSE" == "1" ]] && echo "Pruned $DELETED files in \"$PRUNE_PATH\"" + if (( $DELETED_FILES > 0 )) ; then + echo "Pruned $DELETED_FILES files ($(numfmt --to=iec-i --suffix=B $DELETED_BYTES)) in \"$PRUNE_PATH\"" else - echo "Nothing was pruned in \"$PRUNE_PATH\"!" + echo "Nothing was pruned in \"$PRUNE_PATH\"!" >&2 fi else - [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE% of capacity" + [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE_PCT% of capacity ($USAGE_GB GB)" >&2 fi - if [ $INTERVAL -gt 0 ] ; then - sleep $INTERVAL + if (( $INTERVAL > 0 )) ; then + for i in $(seq 1 $INTERVAL); do sleep 1; done else break fi diff --git a/shared/bin/ufw_allow_requests.sh b/shared/bin/ufw_allow_requests.sh new file mode 100755 index 000000000..bec7776a0 --- /dev/null +++ b/shared/bin/ufw_allow_requests.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. + +# manage a UFW rule for allowing a remote Malcolm instance to connect to +# services hosted on the sensor + +# works with a comma-separated list of IP addresses in $MALCOLM_REQUEST_ACL, or +# if that variable is not set, a single IP address in $OS_HOST + +[[ "$(uname -s)" = 'Darwin' ]] && REALPATH=grealpath || REALPATH=realpath +[[ "$(uname -s)" = 'Darwin' ]] && DIRNAME=gdirname || DIRNAME=dirname +if ! (type "$REALPATH" && type "$DIRNAME") > /dev/null; then + echo "$(basename "${BASH_SOURCE[0]}") requires $REALPATH and $DIRNAME" + exit 1 +fi +export SCRIPT_PATH="$($DIRNAME $($REALPATH -e "${BASH_SOURCE[0]}"))" + +# control_vars.conf file must be specified as argument to script or be found in an expected place +# source configuration variables file if found (precedence: pwd, script directory, /opt/sensor/sensor_ctl) +if [[ -n "$1" ]]; then + source "$1" +else + CONTROL_VARS_FILE="control_vars.conf" + if [[ -r ./"$CONTROL_VARS_FILE" ]]; then + source ./"$CONTROL_VARS_FILE" + elif [[ -r "$SCRIPT_PATH"/"$CONTROL_VARS_FILE" ]]; then + source "$SCRIPT_PATH"/"$CONTROL_VARS_FILE" + elif [[ -r /opt/sensor/sensor_ctl/"$CONTROL_VARS_FILE" ]]; then + source /opt/sensor/sensor_ctl/"$CONTROL_VARS_FILE" + fi +fi + +if [[ -z $MALCOLM_REQUEST_PORTS ]] || ( [[ -z $MALCOLM_REQUEST_ACL ]] && [[ -z $OS_HOST ]] ); then + echo "Either the remote host (\$MALCOLM_REQUEST_ACL or \$OS_HOST) or the request ports (\$MALCOLM_REQUEST_PORTS) is undefined" + exit 1 +elif [[ ! -x /usr/sbin/ufw ]]; then + echo "/usr/sbin/ufw does not exist or is not executable" + exit 1 +fi + +while read SERVICE_PORT; do + + # delete previous UFW rule(s) + while read LINE; do + if [[ -n $LINE ]] && [[ "$LINE" =~ ^[0-9]+$ ]]; then + /usr/sbin/ufw --force delete $LINE + fi + done <<< "$(/usr/sbin/ufw status numbered | tac | grep "${SERVICE_PORT}/tcp" | sed "s/].*//" | sed "s/[^0-9]*//g")" + + # add new UFW rule(s) + if [[ -n $MALCOLM_REQUEST_ACL ]]; then + # loop over ACL IP addresses + IFS="," + for IP in $MALCOLM_REQUEST_ACL; do + /usr/sbin/ufw allow proto tcp from $IP to any port $SERVICE_PORT + done + unset IFS + elif [[ -n $OS_HOST ]]; then + # ACL not defined, create a rule for $OS_HOST + /usr/sbin/ufw allow proto tcp from $OS_HOST to any port $SERVICE_PORT + fi + + # output status of rule + /usr/sbin/ufw status | grep "${SERVICE_PORT}/tcp" + +done < <(echo "${MALCOLM_REQUEST_PORTS}" | tr ',' '\n') # loop over ',' separated MALCOLM_REQUEST_PORTS values \ No newline at end of file diff --git a/shared/bin/ufw_allow_viewer.sh b/shared/bin/ufw_allow_viewer.sh deleted file mode 100755 index 90c06fbaf..000000000 --- a/shared/bin/ufw_allow_viewer.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2024 Battelle Energy Alliance, LLC. All rights reserved. - -# manage a UFW rule for allowing a remote Arkime viewer instance (on the same host -# to which arkime's capture is forwarding session logs) to connect to and -# retrieve PCAP segments from the local Arkime viewer instance - -# works with a comma-separated list of IP addresses in $ARKIME_PACKET_ACL, or -# if that variable is not set, a single IP address in $OS_HOST - -[[ "$(uname -s)" = 'Darwin' ]] && REALPATH=grealpath || REALPATH=realpath -[[ "$(uname -s)" = 'Darwin' ]] && DIRNAME=gdirname || DIRNAME=dirname -if ! (type "$REALPATH" && type "$DIRNAME") > /dev/null; then - echo "$(basename "${BASH_SOURCE[0]}") requires $REALPATH and $DIRNAME" - exit 1 -fi -export SCRIPT_PATH="$($DIRNAME $($REALPATH -e "${BASH_SOURCE[0]}"))" - -# control_vars.conf file must be specified as argument to script or be found in an expected place -# source configuration variables file if found (precedence: pwd, script directory, /opt/sensor/sensor_ctl) -if [[ -n "$1" ]]; then - source "$1" -else - CONTROL_VARS_FILE="control_vars.conf" - if [[ -r ./"$CONTROL_VARS_FILE" ]]; then - source ./"$CONTROL_VARS_FILE" - elif [[ -r "$SCRIPT_PATH"/"$CONTROL_VARS_FILE" ]]; then - source "$SCRIPT_PATH"/"$CONTROL_VARS_FILE" - elif [[ -r /opt/sensor/sensor_ctl/"$CONTROL_VARS_FILE" ]]; then - source /opt/sensor/sensor_ctl/"$CONTROL_VARS_FILE" - fi -fi - -if [[ -z $ARKIME_VIEWER_PORT ]] || ( [[ -z $ARKIME_PACKET_ACL ]] && [[ -z $OS_HOST ]] ); then - echo "Either the remote Arkime viewer host (\$ARKIME_PACKET_ACL or \$OS_HOST) or the local Arkime viewer port (\$ARKIME_VIEWER_PORT) is undefined" - exit 1 -elif [[ ! -x /usr/sbin/ufw ]]; then - echo "/usr/sbin/ufw does not exist or is not executable" - exit 1 -fi - -# delete previous UFW rule(s) -while read LINE; do - if [[ -n $LINE ]] && [[ "$LINE" =~ ^[0-9]+$ ]]; then - /usr/sbin/ufw --force delete $LINE - fi -done <<< "$(/usr/sbin/ufw status numbered | tac | grep "${ARKIME_VIEWER_PORT}/tcp" | sed "s/].*//" | sed "s/[^0-9]*//g")" - -# add new UFW rule(s) -if [[ -n $ARKIME_PACKET_ACL ]]; then - IFS="," - for IP in $ARKIME_PACKET_ACL; do - /usr/sbin/ufw allow proto tcp from $IP to any port $ARKIME_VIEWER_PORT - done - unset IFS -elif [[ -n $OS_HOST ]]; then - /usr/sbin/ufw allow proto tcp from $OS_HOST to any port $ARKIME_VIEWER_PORT -fi - -# output status of rule -/usr/sbin/ufw status | grep "${ARKIME_VIEWER_PORT}/tcp" diff --git a/shared/bin/web-ui-asset-download.sh b/shared/bin/web-ui-asset-download.sh new file mode 100755 index 000000000..e8263d49b --- /dev/null +++ b/shared/bin/web-ui-asset-download.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +unset VERBOSE +OUTPUT_DIR=/tmp + +while getopts o:v opts; do + case ${opts} in + o) OUTPUT_DIR=${OPTARG} ;; + v) VERBOSE=1 ;; + esac +done + +set -e +if [[ -n $VERBOSE ]]; then + set -x +fi + +ASSETS=( + "https://fonts.gstatic.com/s/lato/v24/S6u_w4BMUTPHjxsI9w2_Gwfo.ttf|" + "https://fonts.gstatic.com/s/lato/v24/S6u8w4BMUTPHjxsAXC-v.ttf|" + "https://fonts.gstatic.com/s/lato/v24/S6u_w4BMUTPHjxsI5wq_Gwfo.ttf|" + "https://fonts.gstatic.com/s/lato/v24/S6u9w4BMUTPHh7USSwiPHA.ttf|" + "https://fonts.gstatic.com/s/lato/v24/S6uyw4BMUTPHjx4wWw.ttf|" + "https://fonts.gstatic.com/s/lato/v24/S6u9w4BMUTPHh6UVSwiPHA.ttf|" + "https://cdn.jsdelivr.net/npm/bootstrap-icons@1.5.0/font/fonts/bootstrap-icons.woff2?856008caa5eb66df68595e734e59580d|bootstrap-icons.woff2" + "https://cdn.jsdelivr.net/npm/bootstrap-icons@1.5.0/font/fonts/bootstrap-icons.woff?856008caa5eb66df68595e734e59580d|bootstrap-icons.woff" +) + +mkdir -p "$OUTPUT_DIR" +pushd "$OUTPUT_DIR" >/dev/null 2>&1 +for i in ${ASSETS[@]}; do + URL="$(echo "${i}" | cut -d'|' -f1)" + OUTPUT_FILE="$(echo "${i}" | cut -d'|' -f2)" + if [[ -n "${URL}" ]]; then + if [[ -n "${OUTPUT_FILE}" ]]; then + curl --fail --silent --show-error --output "${OUTPUT_FILE}" "${URL}" + else + curl --fail --silent --show-error --remote-header-name --remote-name "${URL}" + fi + fi +done +popd >/dev/null 2>&1 + + +if [[ -n $VERBOSE ]]; then + set +x +fi +set +e diff --git a/shared/bin/zeek_intel_setup.sh b/shared/bin/zeek_intel_setup.sh index fbc89ed79..8382f4eba 100755 --- a/shared/bin/zeek_intel_setup.sh +++ b/shared/bin/zeek_intel_setup.sh @@ -12,6 +12,7 @@ shopt -s nocasematch ENCODING="utf-8" SCRIPT_FILESPEC="$(realpath -e "${BASH_SOURCE[0]}")" +SCRIPT_FILESPEC_ESCAPED="$(printf '%s\n' "${SCRIPT_FILESPEC}" | sed -e 's/[\/&]/\\&/g')" ZEEK_DIR=${ZEEK_DIR:-"/opt/zeek"} ZEEK_INTEL_ITEM_EXPIRATION=${ZEEK_INTEL_ITEM_EXPIRATION:-"-1min"} ZEEK_INTEL_FEED_SINCE=${ZEEK_INTEL_FEED_SINCE:-""} @@ -21,10 +22,13 @@ INTEL_DIR=${INTEL_DIR:-"${ZEEK_DIR}/share/zeek/site/intel"} INTEL_PRESEED_DIR=${INTEL_PRESEED_DIR:-"${ZEEK_DIR}/share/zeek/site/intel-preseed"} THREAT_FEED_TO_ZEEK_SCRIPT=${THREAT_FEED_TO_ZEEK_SCRIPT:-"${ZEEK_DIR}/bin/zeek_intel_from_threat_feed.py"} LOCK_DIR="${INTEL_DIR}/lock" +INSTANCE_UID="$(tr -dc A-Za-z0-9 /dev/null | head -c 16; echo)" +(( ${#INSTANCE_UID} == 16 )) || INSTANCE_UID=$RANDOM # make sure only one instance of the intel update runs at a time function finish { - rmdir -- "$LOCK_DIR" || echo "Failed to remove lock directory '$LOCK_DIR'" >&2 + rmdir -- "${LOCK_DIR}" || echo "Failed to remove lock directory '${LOCK_DIR}'" >&2 + [[ -n "${INSTANCE_UID}" ]] && find "${INTEL_DIR}"/ -type f -name "*${INSTANCE_UID}*" -delete } mkdir -p -- "$(dirname "$LOCK_DIR")" @@ -50,7 +54,7 @@ if mkdir -- "$LOCK_DIR" 2>/dev/null; then if [[ -d "${INTEL_DIR}" ]] && (( $(find "${INTEL_DIR}" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | wc -l) > 0 )); then pushd "${INTEL_DIR}" >/dev/null 2>&1 - cat > ./__load__.zeek.new << EOF + cat > ./__load__.zeek."${INSTANCE_UID}" << EOF # WARNING: This file is automatically generated. # Do not make direct modifications here. @load policy/integration/collective-intel @@ -81,7 +85,7 @@ EOF elif [[ -f "${DIR}"/__load__.zeek ]]; then # this intel feed has its own load directive and should take care of itself - echo "@load ${DIR}" >> ./__load__.zeek.new + echo "@load ${DIR}" >> ./__load__.zeek."${INSTANCE_UID}" else # this directory contains "loose" intel files we'll need to load explicitly while IFS= read -r line; do @@ -96,10 +100,10 @@ EOF --ssl-verify ${ZEEK_INTEL_FEED_SSL_CERTIFICATE_VERIFICATION} \ --since "${ZEEK_INTEL_FEED_SINCE}" \ --threads ${ZEEK_INTEL_REFRESH_THREADS} \ - --output ./.threat_autogen.zeek.new \ + --output ./.threat_autogen.zeek."${INSTANCE_UID}" \ --input "${THREAT_JSON_FILES[@]}" \ --input-file ./STIX/.stix_input.txt ./MISP/.misp_input.txt - mv --backup=simple --suffix=.old ./.threat_autogen.zeek.new ./.threat_autogen.zeek + mv --backup=simple --suffix=.old ./.threat_autogen.zeek."${INSTANCE_UID}" ./.threat_autogen.zeek rm -f ./.threat_autogen.zeek.old LOOSE_INTEL_FILES+=( "${INTEL_DIR}"/.threat_autogen.zeek ) else @@ -108,15 +112,15 @@ EOF # explicitly load all of the "loose" intel files in other subdirectories that didn't __load__ themselves if (( ${#LOOSE_INTEL_FILES[@]} )); then - echo >> ./__load__.zeek.new - echo 'redef Intel::read_files += {' >> ./__load__.zeek.new + echo >> ./__load__.zeek."${INSTANCE_UID}" + echo 'redef Intel::read_files += {' >> ./__load__.zeek."${INSTANCE_UID}" for INTEL_FILE in "${LOOSE_INTEL_FILES[@]}"; do - echo " \"${INTEL_FILE}\"," >> ./__load__.zeek.new + echo " \"${INTEL_FILE}\"," >> ./__load__.zeek."${INSTANCE_UID}" done - echo '};' >> ./__load__.zeek.new + echo '};' >> ./__load__.zeek."${INSTANCE_UID}" fi - mv --backup=simple --suffix=.old ./__load__.zeek.new ./__load__.zeek + mv --backup=simple --suffix=.old ./__load__.zeek."${INSTANCE_UID}" ./__load__.zeek rm -f ./__load__.zeek.old popd >/dev/null 2>&1 @@ -130,11 +134,16 @@ fi # singleton lock check # write a cron entry to $SUPERCRONIC_CRONTAB using the interval specified in # $ZEEK_INTEL_REFRESH_CRON_EXPRESSION (e.g., 15 1 * * *) to execute this script set +u +if [[ -z "${SUPERCRONIC_CRONTAB}" ]] && \ + [[ -n "${SUPERVISOR_PATH}" ]] && \ + [[ -d "${SUPERVISOR_PATH}"/supercronic ]]; then + SUPERCRONIC_CRONTAB="${SUPERVISOR_PATH}"/supercronic/crontab + touch "${SUPERCRONIC_CRONTAB}" 2>/dev/null || true +fi if [[ -n "${SUPERCRONIC_CRONTAB}" ]] && [[ -f "${SUPERCRONIC_CRONTAB}" ]]; then + sed -i -e "/${SCRIPT_FILESPEC_ESCAPED}/d" "${SUPERCRONIC_CRONTAB}" if [[ -n "${ZEEK_INTEL_REFRESH_CRON_EXPRESSION}" ]]; then - echo "${ZEEK_INTEL_REFRESH_CRON_EXPRESSION} ${SCRIPT_FILESPEC} true" > "${SUPERCRONIC_CRONTAB}" - else - > "${SUPERCRONIC_CRONTAB}" + echo "${ZEEK_INTEL_REFRESH_CRON_EXPRESSION} ${SCRIPT_FILESPEC} true" >> "${SUPERCRONIC_CRONTAB}" fi # reload supercronic if it's running killall -s USR2 supercronic >/dev/null 2>&1 || true diff --git a/shared/bin/zeekdeploy.sh b/shared/bin/zeekdeploy.sh index 422d94549..7f0d83f7d 100755 --- a/shared/bin/zeekdeploy.sh +++ b/shared/bin/zeekdeploy.sh @@ -67,6 +67,7 @@ fi [[ -z $WORKER_LB_PROCS ]] && WORKER_LB_PROCS="$ZEEK_LB_PROCS" [[ -z $ZEEK_LB_METHOD ]] && ZEEK_LB_METHOD="custom" [[ -z $ZEEK_AF_PACKET_BUFFER_SIZE ]] && ZEEK_AF_PACKET_BUFFER_SIZE="$(echo "64*1024*1024" | bc)" +[[ -z $ZEEK_INTEL_REFRESH_ON_DEPLOY ]] && ZEEK_INTEL_REFRESH_ON_DEPLOY="true" # if zeek log path is unspecified, write logs to pwd [[ -z $ZEEK_LOG_PATH ]] && ZEEK_LOG_PATH=. @@ -89,16 +90,18 @@ ZEEK_EXTRACTOR_SCRIPT="$ZEEK_INSTALL_PATH"/share/zeek/site/"$EXTRACTOR_ZEEK_SCRI [[ -n "$ZEEK_INTEL_PATH" ]] && INTEL_DIR="$ZEEK_INTEL_PATH" || INTEL_DIR=/opt/sensor/sensor_ctl/zeek/intel export INTEL_DIR mkdir -p "$INTEL_DIR"/STIX "$INTEL_DIR"/MISP -touch "$INTEL_DIR"/__load__.zeek || true +touch "$INTEL_DIR"/__load__.zeek 2>/dev/null || true # autoconfigure load directives for intel files -[[ -x "$ZEEK_INSTALL_PATH"/bin/zeek_intel_setup.sh ]] && "$ZEEK_INSTALL_PATH"/bin/zeek_intel_setup.sh /bin/true +[[ -x "$ZEEK_INSTALL_PATH"/bin/zeek_intel_setup.sh ]] && \ + [[ "$ZEEK_INTEL_REFRESH_ON_DEPLOY" == "true" ]] && \ + "$ZEEK_INSTALL_PATH"/bin/zeek_intel_setup.sh /bin/true INTEL_UPDATE_TIME_PREV=0 # make sure "custom" directory exists, even if empty [[ -n "$ZEEK_CUSTOM_PATH" ]] && CUSTOM_DIR="$ZEEK_CUSTOM_PATH" || CUSTOM_DIR=/opt/sensor/sensor_ctl/zeek/custom export CUSTOM_DIR mkdir -p "$CUSTOM_DIR" -touch "$CUSTOM_DIR"/__load__.zeek || true +touch "$CUSTOM_DIR"/__load__.zeek 2>/dev/null || true # configure zeek cfg files pushd "$ZEEK_INSTALL_PATH"/etc >/dev/null 2>&1 @@ -262,8 +265,8 @@ while [ $("$ZEEK_CTL" status | tail -n +2 | grep -P "localhost\s+running\s+\d+" # check to see if intel feeds were updated, and if so, restart INTEL_UPDATE_TIME="$(stat -c %Y "$INTEL_DIR"/__load__.zeek 2>/dev/null || echo '0')" if (( $INTEL_UPDATE_TIME > $INTEL_UPDATE_TIME_PREV )); then - echo "Restarting via \"$ZEEK_CTL\" after intel update..." >&2 - "$ZEEK_CTL" restart + echo "Redeploying via \"$ZEEK_CTL deploy\" after intel update..." >&2 + "$ZEEK_CTL" deploy INTEL_UPDATE_TIME_PREV="$INTEL_UPDATE_TIME" fi diff --git a/zeek/scripts/docker_entrypoint.sh b/zeek/scripts/docker_entrypoint.sh index 0ed78c9b0..34aa5f82b 100755 --- a/zeek/scripts/docker_entrypoint.sh +++ b/zeek/scripts/docker_entrypoint.sh @@ -3,11 +3,11 @@ ZEEK_DIR=${ZEEK_DIR:-"/opt/zeek"} # ensure capabilities for capture -setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' "${ZEEK_DIR}"/bin/zeek || true -setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' "${ZEEK_DIR}"/bin/capstats || true +setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' "${ZEEK_DIR}"/bin/zeek 2>/dev/null || true +setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' "${ZEEK_DIR}"/bin/capstats 2>/dev/null || true -if [[ "${ZEEK_LIVE_CAPTURE:-false}" != "true" ]] && [[ -x "${ZEEK_DIR}"/bin/zeek_intel_setup.sh ]]; then - sleep 15 # give the "live" instance, if there is one, a chance to go first +if [[ "${ZEEK_INTEL_REFRESH_ON_ENTRYPOINT:-false}" == "true" ]] && \ + [[ -x "${ZEEK_DIR}"/bin/zeek_intel_setup.sh ]]; then if [[ "$(id -u)" == "0" ]] && [[ -n "$PUSER" ]]; then su -s /bin/bash -p ${PUSER} << EOF "${ZEEK_DIR}"/bin/zeek_intel_setup.sh /bin/true diff --git a/zeek/supervisord.conf b/zeek/supervisord.conf index 5ad1d49b7..89d94aa61 100644 --- a/zeek/supervisord.conf +++ b/zeek/supervisord.conf @@ -43,8 +43,8 @@ user=%(ENV_PUSER)s [program:cron] command=/usr/local/bin/supercronic -json "%(ENV_SUPERCRONIC_CRONTAB)s" -autostart=%(ENV_ZEEK_CRON)s -autorestart=%(ENV_ZEEK_CRON)s +autostart=true +autorestart=true stopasgroup=true killasgroup=true stdout_logfile=/dev/fd/1