From b630e5454d5338e8996c40cb268ce968d372727c Mon Sep 17 00:00:00 2001 From: Yaroslav Svitlytskyi Date: Sat, 15 Mar 2025 00:14:55 +0100 Subject: [PATCH 1/2] feature: modified chimney deployments to work with kms mode --- chimey-disable-kms.sh | 0 chimey-enable-kms.sh | 96 +++++++++++++++++++++++++++++++++++++++++++ chimney.sh | 27 +++++++++++- 3 files changed, 122 insertions(+), 1 deletion(-) create mode 100644 chimey-disable-kms.sh create mode 100644 chimey-enable-kms.sh diff --git a/chimey-disable-kms.sh b/chimey-disable-kms.sh new file mode 100644 index 0000000..e69de29 diff --git a/chimey-enable-kms.sh b/chimey-enable-kms.sh new file mode 100644 index 0000000..073dea3 --- /dev/null +++ b/chimey-enable-kms.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +if [ "$(id -u)" -ne 0 ]; then + echo "This script requires sudo privileges. Please enter your password:" + exec sudo "$0" "$@" # This re-executes the script with sudo +fi + +# setup variables +export SCRIPT_EXPIRATION=scriptexpiration +export JWT_TOKEN=jwttoken + +export DEBIAN_FRONTEND=noninteractive + +sudo apt update + +if dpkg --get-selections | grep -q "unattended-upgrades"; then + echo "unattended-upgrades is installed. removing it" + sudo apt-get remove -y --purge unattended-upgrades +else + echo "unattended-upgrades is not installed. Nothing to do." +fi + +install_tools_utilities() { + REQUIRED_PKG=$1 + PKG_OK=$(dpkg-query -W --showformat='${Status}\n' $REQUIRED_PKG | grep "install ok installed") + echo -e "\e[37mChecking for $REQUIRED_PKG if it is already installed. \e[73m" + if [ "" = "$PKG_OK" ]; then + echo -e "\e[31m No $REQUIRED_PKG is found on the server. \e[13m\e[32m$REQUIRED_PKG installed. \e[23m \n" + sudo apt --yes install $REQUIRED_PKG &>/dev/null + else + echo -e "\e[32m $REQUIRED_PKG is already installed on the server/machine. \e[23m \n" + fi +} + +check_port_443() { + PORT=443 + command -v netstat >/dev/null 2>&1 || { + echo >&2 "netstat command not found. Exiting." + exit 1 + } + + if netstat -tulpn | grep ":$PORT" >/dev/null; then + echo "Port $PORT is in use." + echo "Please stop the process running on port $PORT and run the script again" + exit 1 + else + echo "Port $PORT is not in use." + fi +} + +install_tools_utilities unzip +install_tools_utilities curl +install_tools_utilities containerd +install_tools_utilities docker.io +install_tools_utilities systemd +install_tools_utilities "systemd-timesyncd" +install_tools_utilities ufw +install_tools_utilities ntp +install_tools_utilities ntpdate +install_tools_utilities net-tools +install_tools_utilities python3 +install_tools_utilities jq + +#Setting latest docker image wrt latest release +export DOCKER_IMAGE=$(curl -s https://registry.hub.docker.com/v2/repositories/0chaindev/blobber/tags?page_size=100 | jq -r '.results[] | select(.name | test("^v[0-9]+\\.[0-9]+\\.[0-9]+$")) | .name' | sort -V | tail -n 1) +export DOCKER_IMAGE_EBLOBBER=$(curl -s https://registry.hub.docker.com/v2/repositories/0chaindev/eblobber/tags?page_size=100 | jq -r '.results[] | select(.name | test("^v[0-9]+\\.[0-9]+\\.[0-9]+$")) | .name' | sort -V | tail -n 1) + +sudo ufw allow 123/udp +sudo ufw allow out to any port 123 +sudo systemctl stop ntp +sudo ntpdate pool.ntp.org +sudo systemctl start ntp +sudo systemctl enable ntp +sudo ufw allow 22,80,443,53/tcp +sudo ufw allow out to any port 80 +sudo ufw allow out to any port 443 +sudo ufw allow out to any port 53 + +echo "checking if ports are available..." +check_port_443 + + +echo "Blobber KMS has been enabled." + +# TODO: replace command in docker compose file. + +# TODO: there will be three states +# 1. Blobber is deployed with KMS enabled parameter and then delegate wallet gets registered to KMS and that split key is set for operational wallet for deployed blobber +# 2. User wants to set a different split key for running container, but this container should not be rented. For rented blobbers it won't work +# 3. User wants to deactivate KMS. It would require user to provide new operational wallet on UI. + + +* --keys_file_is_split +* --keys_file_public_key +* --keys_file_private_key +* --keys_file_client_key \ No newline at end of file diff --git a/chimney.sh b/chimney.sh index 9bcc0cb..5ddbf17 100644 --- a/chimney.sh +++ b/chimney.sh @@ -20,6 +20,13 @@ export PROJECT_ROOT=/var/0chain/blobber export BLOCK_WORKER_URL=0chainblockworker export BLOBBER_HOST=0chainblobberhost export IS_ENTERPRISE=isenterprise +export IS_KMS_ENABLED=iskmsenabled +export KMS_PUBLIC_KEY=kmspublickey +export KMS_PRIVATE_KEY=kmsprivatekey +export KMS_CLIENT_KEY=kmsclientkey + +# TODO: how to make firebase token persistent, because if user doesn't use his script for some time, then firebase token would expire +# TODO: probably do some expiration time for the script. # export VALIDATOR_WALLET_ID=0chainvalwalletid # export VALIDATOR_WALLET_PUBLIC_KEY=0chainvalwalletpublickey @@ -166,6 +173,18 @@ pushd ${PROJECT_ROOT} > /dev/null; ./bin/zwallet create-wallet --wallet vald_op_wallet.json --configDir . --config config.yaml --silent popd > /dev/null; + # sed -i "s/validator:${DOCKER_IMAGE}/evalidator:${DOCKER_IMAGE_EBLOBBER}/g" ${PROJECT_ROOT}/docker-compose.yml + # sed -i "s/blobber:${DOCKER_IMAGE}/eblobber:${DOCKER_IMAGE_EBLOBBER}/g" ${PROJECT_ROOT}/docker-compose.yml + +if [ "$IS_KMS_ENABLED" = true ]; then + echo -e "\n\e[93m=============================================================================================================================================================================== + Saving blobber/validator Operational wallets to KMS. + =============================================================================================================================================================================== \e[39m" + + # TODO: how to create JWT token from script? CREATE SIGNATURE ON FE AND PASS IT TO THE SCRIPT + # TODO: or create JWT token on FE and pass it to the user +fi + #### ---- Start Blobber Setup ----- #### FOLDERS_TO_CREATE="config sql bin monitoringconfig keys_config" @@ -322,6 +341,12 @@ ${BLOBBER_HOST} { EOF +if [ -n "$IS_KMS_ENABLED" ]; then + KMS_COMMANDS="--keys_file_is_split --keys_file_public_key ${KMS_PUBLIC_KEY} --keys_file_private_key ${KMS_PRIVATE_KEY} --keys_file_client_key ${KMS_CLIENT_KEY}" +else + KMS_COMMANDS="" +fi + ### docker-compose.yaml echo "creating docker-compose file" cat <${PROJECT_ROOT}/docker-compose.yml @@ -384,7 +409,7 @@ services: - ${PROJECT_ROOT}/keys_config:/blobber/keysconfig # keys and minio config - ${PROJECT_ROOT_HDD}/data/tmp:/tmp - ${PROJECT_ROOT}/sql:/blobber/sql - command: ./bin/blobber --port 5051 --grpc_port 31501 --hostname ${BLOBBER_HOST} --deployment_mode 0 --keys_file keysconfig/b0bnode01_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --hosturl https://${BLOBBER_HOST} + command: ./bin/blobber --port 5051 --grpc_port 31501 --hostname ${BLOBBER_HOST} --deployment_mode 0 --keys_file keysconfig/b0bnode01_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --hosturl https://${BLOBBER_HOST} ${KMS_COMMANDS} networks: default: restart: "always" From 33c61b695b443aca738737e84056b9d226d78f3f Mon Sep 17 00:00:00 2001 From: Yaroslav Svitlytskyi Date: Sat, 15 Mar 2025 09:58:51 +0100 Subject: [PATCH 2/2] feature: added kms enable and disable scripts --- chimey-disable-kms.sh | 203 +++++++++++++++++++++++++++++++ chimey-enable-kms.sh | 269 +++++++++++++++++++++++++++++------------- chimney.sh | 15 +-- 3 files changed, 395 insertions(+), 92 deletions(-) diff --git a/chimey-disable-kms.sh b/chimey-disable-kms.sh index e69de29..528d66a 100644 --- a/chimey-disable-kms.sh +++ b/chimey-disable-kms.sh @@ -0,0 +1,203 @@ +#!/bin/bash + +if [ "$(id -u)" -ne 0 ]; then + echo "This script requires sudo privileges. Please enter your password:" + exec sudo "$0" "$@" # This re-executes the script with sudo +fi + +# setup variables +export DEBIAN_FRONTEND=noninteractive + +export PROJECT_ROOT_SSD=/var/0chain/blobber/ssd +export PROJECT_ROOT_HDD=/var/0chain/blobber/hdd + +export BRANCH_NAME=main + +#Setting latest docker image wrt latest release +export DOCKER_IMAGE=$(curl -s https://registry.hub.docker.com/v2/repositories/0chaindev/blobber/tags?page_size=100 | jq -r '.results[] | select(.name | test("^v[0-9]+\\.[0-9]+\\.[0-9]+$")) | .name' | sort -V | tail -n 1) +export DOCKER_IMAGE_EBLOBBER=$(curl -s https://registry.hub.docker.com/v2/repositories/0chaindev/eblobber/tags?page_size=100 | jq -r '.results[] | select(.name | test("^v[0-9]+\\.[0-9]+\\.[0-9]+$")) | .name' | sort -V | tail -n 1) + +### docker-compose.yaml +echo "creating docker-compose file" +cat <${PROJECT_ROOT}/docker-compose.yml +--- +version: "3" +services: + postgres: + image: postgres:14 + environment: + POSTGRES_HOST_AUTH_METHOD: trust + POSTGRES_USER: blobber_user + POSTGRES_DB: blobber_meta + POSTGRES_PASSWORD: blobber + SLOW_TABLESPACE_PATH: /var/lib/postgresql/hdd + SLOW_TABLESPACE: hdd_tablespace + volumes: + - ${PROJECT_ROOT_SSD}/data/postgresql:/var/lib/postgresql/data + - ${PROJECT_ROOT_HDD}/pg_hdd_data:/var/lib/postgresql/hdd + - ${PROJECT_ROOT}/postgresql.conf:/var/lib/postgresql/postgresql.conf + - ${PROJECT_ROOT}/sql_init:/docker-entrypoint-initdb.d + command: postgres -c config_file=/var/lib/postgresql/postgresql.conf + networks: + default: + restart: "always" + + validator: + image: 0chaindev/validator:${DOCKER_IMAGE} + environment: + - DOCKER= true + volumes: + - ${PROJECT_ROOT}/config:/validator/config + - ${PROJECT_ROOT_HDD}/data:/validator/data + - ${PROJECT_ROOT_HDD}/log:/validator/log + - ${PROJECT_ROOT}/keys_config:/validator/keysconfig + command: ./bin/validator --port 5061 --hostname ${BLOBBER_HOST} --deployment_mode 0 --keys_file keysconfig/b0vnode01_keys.txt --log_dir /validator/log --hosturl https://${BLOBBER_HOST}/validator + networks: + default: + restart: "always" + + blobber: + image: 0chaindev/blobber:${DOCKER_IMAGE} + environment: + DOCKER: "true" + DB_NAME: blobber_meta + DB_USER: blobber_user + DB_PASSWORD: blobber + DB_PORT: "5432" + DB_HOST: postgres + depends_on: + - validator + links: + - validator:validator + volumes: + - ${PROJECT_ROOT}/config:/blobber/config + - ${PROJECT_ROOT_HDD}/files:/blobber/files + - ${PROJECT_ROOT_HDD}/data:/blobber/data + - ${PROJECT_ROOT_HDD}/log:/blobber/log + - ${PROJECT_ROOT_SSD}/data/pebble/data:/pebble/data + - ${PROJECT_ROOT_SSD}/data/pebble/wal:/pebble/wal + - ${PROJECT_ROOT}/keys_config:/blobber/keysconfig # keys and minio config + - ${PROJECT_ROOT_HDD}/data/tmp:/tmp + - ${PROJECT_ROOT}/sql:/blobber/sql + command: ./bin/blobber --port 5051 --grpc_port 31501 --hostname ${BLOBBER_HOST} --deployment_mode 0 --keys_file keysconfig/b0bnode01_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --hosturl https://${BLOBBER_HOST} + networks: + default: + restart: "always" + + caddy: + image: caddy:2.6.4 + ports: + - "80:80" + - "443:443" + - "443:443/udp" + volumes: + - ${PROJECT_ROOT}/Caddyfile:/etc/caddy/Caddyfile + - ${PROJECT_ROOT}/site:/srv + - ${PROJECT_ROOT}/caddy_data:/data + - ${PROJECT_ROOT}/caddy_config:/config + restart: "always" + + promtail: + image: grafana/promtail:2.8.2 + volumes: + - ${PROJECT_ROOT_HDD}/log/:/logs + - ${PROJECT_ROOT}/monitoringconfig/promtail-config.yaml:/mnt/config/promtail-config.yaml + command: -config.file=/mnt/config/promtail-config.yaml + restart: "always" + + loki: + image: grafana/loki:2.8.2 + user: "1001" + volumes: + - ${PROJECT_ROOT}/monitoringconfig/loki-config.yaml:/mnt/config/loki-config.yaml + - ${PROJECT_ROOT_HDD}/loki:/data + - ${PROJECT_ROOT_HDD}/loki/rules:/etc/loki/rules + command: -config.file=/mnt/config/loki-config.yaml + restart: "always" + + prometheus: + image: prom/prometheus:v2.44.0 + user: root + volumes: + - ${PROJECT_ROOT}/monitoringconfig/prometheus.yml:/etc/prometheus/prometheus.yml + - prometheus_data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + restart: "always" + depends_on: + - cadvisor + + cadvisor: + image: wywywywy/docker_stats_exporter:20220516 + container_name: cadvisor + volumes: + - /var/run/docker.sock:/var/run/docker.sock + restart: "always" + + node-exporter: + image: prom/node-exporter:v1.5.0 + container_name: node-exporter + restart: unless-stopped + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /:/rootfs:ro + command: + - '--path.procfs=/host/proc' + - '--path.rootfs=/rootfs' + - '--path.sysfs=/host/sys' + - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)(\$\$|/)' + restart: "always" + + grafana: + image: grafana/grafana:9.5.2 + environment: + GF_SERVER_ROOT_URL: "https://${BLOBBER_HOST}/grafana" + GF_SECURITY_ADMIN_USER: "${GF_ADMIN_USER}" + GF_SECURITY_ADMIN_PASSWORD: "${GF_ADMIN_PASSWORD}" + GF_AUTH_ANONYMOUS_ENABLED: "true" + GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer" + volumes: + - ${PROJECT_ROOT}/monitoringconfig/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yaml + - grafana_data:/var/lib/grafana + restart: "always" + + monitoringapi: + image: 0chaindev/monitoringapi:latest + restart: "always" + + agent: + image: portainer/agent:2.18.2-alpine + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + + portainer: + image: portainer/portainer-ce:2.18.2-alpine + command: '-H tcp://agent:9001 --tlsskipverify --admin-password-file /tmp/portainer_password' + depends_on: + - agent + links: + - agent:agent + volumes: + - portainer_data:/data + - /tmp/portainer_password:/tmp/portainer_password + restart: "always" + +networks: + default: + driver: bridge + +volumes: + grafana_data: + prometheus_data: + portainer_data: + +EOF + +/usr/local/bin/docker-compose -f ${PROJECT_ROOT}/docker-compose.yml down +/usr/local/bin/docker-compose -f ${PROJECT_ROOT}/docker-compose.yml pull +/usr/local/bin/docker-compose -f ${PROJECT_ROOT}/docker-compose.yml up -d + +echo "Blobber KMS mode is disabled." diff --git a/chimey-enable-kms.sh b/chimey-enable-kms.sh index 073dea3..f505925 100644 --- a/chimey-enable-kms.sh +++ b/chimey-enable-kms.sh @@ -6,91 +6,202 @@ if [ "$(id -u)" -ne 0 ]; then fi # setup variables -export SCRIPT_EXPIRATION=scriptexpiration -export JWT_TOKEN=jwttoken +export KMS_PUBLIC_KEY=kmspublickey +export KMS_PRIVATE_KEY=kmsprivatekey +export KMS_CLIENT_KEY=kmsclientkey export DEBIAN_FRONTEND=noninteractive -sudo apt update +export PROJECT_ROOT_SSD=/var/0chain/blobber/ssd +export PROJECT_ROOT_HDD=/var/0chain/blobber/hdd -if dpkg --get-selections | grep -q "unattended-upgrades"; then - echo "unattended-upgrades is installed. removing it" - sudo apt-get remove -y --purge unattended-upgrades -else - echo "unattended-upgrades is not installed. Nothing to do." -fi - -install_tools_utilities() { - REQUIRED_PKG=$1 - PKG_OK=$(dpkg-query -W --showformat='${Status}\n' $REQUIRED_PKG | grep "install ok installed") - echo -e "\e[37mChecking for $REQUIRED_PKG if it is already installed. \e[73m" - if [ "" = "$PKG_OK" ]; then - echo -e "\e[31m No $REQUIRED_PKG is found on the server. \e[13m\e[32m$REQUIRED_PKG installed. \e[23m \n" - sudo apt --yes install $REQUIRED_PKG &>/dev/null - else - echo -e "\e[32m $REQUIRED_PKG is already installed on the server/machine. \e[23m \n" - fi -} - -check_port_443() { - PORT=443 - command -v netstat >/dev/null 2>&1 || { - echo >&2 "netstat command not found. Exiting." - exit 1 - } - - if netstat -tulpn | grep ":$PORT" >/dev/null; then - echo "Port $PORT is in use." - echo "Please stop the process running on port $PORT and run the script again" - exit 1 - else - echo "Port $PORT is not in use." - fi -} - -install_tools_utilities unzip -install_tools_utilities curl -install_tools_utilities containerd -install_tools_utilities docker.io -install_tools_utilities systemd -install_tools_utilities "systemd-timesyncd" -install_tools_utilities ufw -install_tools_utilities ntp -install_tools_utilities ntpdate -install_tools_utilities net-tools -install_tools_utilities python3 -install_tools_utilities jq +export BRANCH_NAME=main #Setting latest docker image wrt latest release export DOCKER_IMAGE=$(curl -s https://registry.hub.docker.com/v2/repositories/0chaindev/blobber/tags?page_size=100 | jq -r '.results[] | select(.name | test("^v[0-9]+\\.[0-9]+\\.[0-9]+$")) | .name' | sort -V | tail -n 1) export DOCKER_IMAGE_EBLOBBER=$(curl -s https://registry.hub.docker.com/v2/repositories/0chaindev/eblobber/tags?page_size=100 | jq -r '.results[] | select(.name | test("^v[0-9]+\\.[0-9]+\\.[0-9]+$")) | .name' | sort -V | tail -n 1) -sudo ufw allow 123/udp -sudo ufw allow out to any port 123 -sudo systemctl stop ntp -sudo ntpdate pool.ntp.org -sudo systemctl start ntp -sudo systemctl enable ntp -sudo ufw allow 22,80,443,53/tcp -sudo ufw allow out to any port 80 -sudo ufw allow out to any port 443 -sudo ufw allow out to any port 53 - -echo "checking if ports are available..." -check_port_443 - - -echo "Blobber KMS has been enabled." - -# TODO: replace command in docker compose file. - -# TODO: there will be three states -# 1. Blobber is deployed with KMS enabled parameter and then delegate wallet gets registered to KMS and that split key is set for operational wallet for deployed blobber -# 2. User wants to set a different split key for running container, but this container should not be rented. For rented blobbers it won't work -# 3. User wants to deactivate KMS. It would require user to provide new operational wallet on UI. - - -* --keys_file_is_split -* --keys_file_public_key -* --keys_file_private_key -* --keys_file_client_key \ No newline at end of file +### docker-compose.yaml +echo "creating docker-compose file" +cat <${PROJECT_ROOT}/docker-compose.yml +--- +version: "3" +services: + postgres: + image: postgres:14 + environment: + POSTGRES_HOST_AUTH_METHOD: trust + POSTGRES_USER: blobber_user + POSTGRES_DB: blobber_meta + POSTGRES_PASSWORD: blobber + SLOW_TABLESPACE_PATH: /var/lib/postgresql/hdd + SLOW_TABLESPACE: hdd_tablespace + volumes: + - ${PROJECT_ROOT_SSD}/data/postgresql:/var/lib/postgresql/data + - ${PROJECT_ROOT_HDD}/pg_hdd_data:/var/lib/postgresql/hdd + - ${PROJECT_ROOT}/postgresql.conf:/var/lib/postgresql/postgresql.conf + - ${PROJECT_ROOT}/sql_init:/docker-entrypoint-initdb.d + command: postgres -c config_file=/var/lib/postgresql/postgresql.conf + networks: + default: + restart: "always" + + validator: + image: 0chaindev/validator:${DOCKER_IMAGE} + environment: + - DOCKER= true + volumes: + - ${PROJECT_ROOT}/config:/validator/config + - ${PROJECT_ROOT_HDD}/data:/validator/data + - ${PROJECT_ROOT_HDD}/log:/validator/log + - ${PROJECT_ROOT}/keys_config:/validator/keysconfig + command: ./bin/validator --port 5061 --hostname ${BLOBBER_HOST} --deployment_mode 0 --keys_file keysconfig/b0vnode01_keys.txt --log_dir /validator/log --hosturl https://${BLOBBER_HOST}/validator + networks: + default: + restart: "always" + + blobber: + image: 0chaindev/blobber:${DOCKER_IMAGE} + environment: + DOCKER: "true" + DB_NAME: blobber_meta + DB_USER: blobber_user + DB_PASSWORD: blobber + DB_PORT: "5432" + DB_HOST: postgres + depends_on: + - validator + links: + - validator:validator + volumes: + - ${PROJECT_ROOT}/config:/blobber/config + - ${PROJECT_ROOT_HDD}/files:/blobber/files + - ${PROJECT_ROOT_HDD}/data:/blobber/data + - ${PROJECT_ROOT_HDD}/log:/blobber/log + - ${PROJECT_ROOT_SSD}/data/pebble/data:/pebble/data + - ${PROJECT_ROOT_SSD}/data/pebble/wal:/pebble/wal + - ${PROJECT_ROOT}/keys_config:/blobber/keysconfig # keys and minio config + - ${PROJECT_ROOT_HDD}/data/tmp:/tmp + - ${PROJECT_ROOT}/sql:/blobber/sql + command: ./bin/blobber --port 5051 --grpc_port 31501 --hostname ${BLOBBER_HOST} --deployment_mode 0 --keys_file keysconfig/b0bnode01_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --hosturl https://${BLOBBER_HOST} --keys_file_is_split --keys_file_public_key ${KMS_PUBLIC_KEY} --keys_file_private_key ${KMS_PRIVATE_KEY} --keys_file_client_key ${KMS_CLIENT_KEY} + networks: + default: + restart: "always" + + caddy: + image: caddy:2.6.4 + ports: + - "80:80" + - "443:443" + - "443:443/udp" + volumes: + - ${PROJECT_ROOT}/Caddyfile:/etc/caddy/Caddyfile + - ${PROJECT_ROOT}/site:/srv + - ${PROJECT_ROOT}/caddy_data:/data + - ${PROJECT_ROOT}/caddy_config:/config + restart: "always" + + promtail: + image: grafana/promtail:2.8.2 + volumes: + - ${PROJECT_ROOT_HDD}/log/:/logs + - ${PROJECT_ROOT}/monitoringconfig/promtail-config.yaml:/mnt/config/promtail-config.yaml + command: -config.file=/mnt/config/promtail-config.yaml + restart: "always" + + loki: + image: grafana/loki:2.8.2 + user: "1001" + volumes: + - ${PROJECT_ROOT}/monitoringconfig/loki-config.yaml:/mnt/config/loki-config.yaml + - ${PROJECT_ROOT_HDD}/loki:/data + - ${PROJECT_ROOT_HDD}/loki/rules:/etc/loki/rules + command: -config.file=/mnt/config/loki-config.yaml + restart: "always" + + prometheus: + image: prom/prometheus:v2.44.0 + user: root + volumes: + - ${PROJECT_ROOT}/monitoringconfig/prometheus.yml:/etc/prometheus/prometheus.yml + - prometheus_data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + restart: "always" + depends_on: + - cadvisor + + cadvisor: + image: wywywywy/docker_stats_exporter:20220516 + container_name: cadvisor + volumes: + - /var/run/docker.sock:/var/run/docker.sock + restart: "always" + + node-exporter: + image: prom/node-exporter:v1.5.0 + container_name: node-exporter + restart: unless-stopped + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /:/rootfs:ro + command: + - '--path.procfs=/host/proc' + - '--path.rootfs=/rootfs' + - '--path.sysfs=/host/sys' + - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)(\$\$|/)' + restart: "always" + + grafana: + image: grafana/grafana:9.5.2 + environment: + GF_SERVER_ROOT_URL: "https://${BLOBBER_HOST}/grafana" + GF_SECURITY_ADMIN_USER: "${GF_ADMIN_USER}" + GF_SECURITY_ADMIN_PASSWORD: "${GF_ADMIN_PASSWORD}" + GF_AUTH_ANONYMOUS_ENABLED: "true" + GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer" + volumes: + - ${PROJECT_ROOT}/monitoringconfig/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yaml + - grafana_data:/var/lib/grafana + restart: "always" + + monitoringapi: + image: 0chaindev/monitoringapi:latest + restart: "always" + + agent: + image: portainer/agent:2.18.2-alpine + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + + portainer: + image: portainer/portainer-ce:2.18.2-alpine + command: '-H tcp://agent:9001 --tlsskipverify --admin-password-file /tmp/portainer_password' + depends_on: + - agent + links: + - agent:agent + volumes: + - portainer_data:/data + - /tmp/portainer_password:/tmp/portainer_password + restart: "always" + +networks: + default: + driver: bridge + +volumes: + grafana_data: + prometheus_data: + portainer_data: + +EOF + +/usr/local/bin/docker-compose -f ${PROJECT_ROOT}/docker-compose.yml down +/usr/local/bin/docker-compose -f ${PROJECT_ROOT}/docker-compose.yml pull +/usr/local/bin/docker-compose -f ${PROJECT_ROOT}/docker-compose.yml up -d + +echo "Blobber KMS mode is enabled." diff --git a/chimney.sh b/chimney.sh index 5ddbf17..588de04 100644 --- a/chimney.sh +++ b/chimney.sh @@ -25,9 +25,6 @@ export KMS_PUBLIC_KEY=kmspublickey export KMS_PRIVATE_KEY=kmsprivatekey export KMS_CLIENT_KEY=kmsclientkey -# TODO: how to make firebase token persistent, because if user doesn't use his script for some time, then firebase token would expire -# TODO: probably do some expiration time for the script. - # export VALIDATOR_WALLET_ID=0chainvalwalletid # export VALIDATOR_WALLET_PUBLIC_KEY=0chainvalwalletpublickey # export VALIDATOR_WALLET_PRIV_KEY=0chainvalwalletprivkey @@ -62,6 +59,7 @@ install_tools_utilities() { echo -e "\e[32m $REQUIRED_PKG is already installed on the server/machine. \e[23m \n" fi } + check_port_443() { PORT=443 command -v netstat >/dev/null 2>&1 || { @@ -176,15 +174,6 @@ popd > /dev/null; # sed -i "s/validator:${DOCKER_IMAGE}/evalidator:${DOCKER_IMAGE_EBLOBBER}/g" ${PROJECT_ROOT}/docker-compose.yml # sed -i "s/blobber:${DOCKER_IMAGE}/eblobber:${DOCKER_IMAGE_EBLOBBER}/g" ${PROJECT_ROOT}/docker-compose.yml -if [ "$IS_KMS_ENABLED" = true ]; then - echo -e "\n\e[93m=============================================================================================================================================================================== - Saving blobber/validator Operational wallets to KMS. - =============================================================================================================================================================================== \e[39m" - - # TODO: how to create JWT token from script? CREATE SIGNATURE ON FE AND PASS IT TO THE SCRIPT - # TODO: or create JWT token on FE and pass it to the user -fi - #### ---- Start Blobber Setup ----- #### FOLDERS_TO_CREATE="config sql bin monitoringconfig keys_config" @@ -409,7 +398,7 @@ services: - ${PROJECT_ROOT}/keys_config:/blobber/keysconfig # keys and minio config - ${PROJECT_ROOT_HDD}/data/tmp:/tmp - ${PROJECT_ROOT}/sql:/blobber/sql - command: ./bin/blobber --port 5051 --grpc_port 31501 --hostname ${BLOBBER_HOST} --deployment_mode 0 --keys_file keysconfig/b0bnode01_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --hosturl https://${BLOBBER_HOST} ${KMS_COMMANDS} + command: ./bin/blobber --port 5051 --grpc_port 31501 --hostname ${BLOBBER_HOST} --deployment_mode 0 --keys_file keysconfig/b0bnode01_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --hosturl https://${BLOBBER_HOST} ${KMS_COMMANDS} networks: default: restart: "always"