From 89be91770c73eee165bd5f6ea49b63f6425ad548 Mon Sep 17 00:00:00 2001 From: ndrpp Date: Tue, 13 Jan 2026 11:59:59 +0200 Subject: [PATCH 1/2] feat: update node quickstart script with gpu auto detect --- scripts/list_gpus.sh | 109 +++++++++++++++++++++++++++++++ scripts/ocean-node-quickstart.sh | 87 +++++++++++++++++++++++- 2 files changed, 193 insertions(+), 3 deletions(-) create mode 100755 scripts/list_gpus.sh diff --git a/scripts/list_gpus.sh b/scripts/list_gpus.sh new file mode 100755 index 000000000..675eb878d --- /dev/null +++ b/scripts/list_gpus.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +# Function to check for NVIDIA GPUs +get_nvidia_gpus() { + if command -v nvidia-smi &> /dev/null; then + # Query nvidia-smi for GPU count, names, and UUIDs + # We use csv format for easier parsing + nvidia-smi --query-gpu=name,uuid --format=csv,noheader | while IFS=, read -r name uuid; do + # Trim leading/trailing whitespace + name=$(echo "$name" | xargs) + uuid=$(echo "$uuid" | xargs) + + # Create a JSON object for this GPU + # Note: We use the UUID as the ID locally, but it will be aggregated later + jq -c -n \ + --arg name "$name" \ + --arg uuid "$uuid" \ + '{ + description: $name, + init: { + deviceRequests: { + Driver: "nvidia", + DeviceIDs: [$uuid] + } + } + }' + done + fi +} + +# Function to check for other GPUs (AMD, Intel, etc.) via lspci +get_generic_gpus() { + # Check if lspci is available + if ! command -v lspci &> /dev/null; then + return + fi + + # Iterate over VGA and 3D controllers + lspci -mm -n -d ::0300 | while read -r line; do process_pci_line "$line"; done + lspci -mm -n -d ::0302 | while read -r line; do process_pci_line "$line"; done +} + +process_pci_line() { + line="$1" + + slot=$(echo "$line" | awk '{print $1}') + vendor_id=$(echo "$line" | awk '{print $3}' | tr -d '"') + + # We want to exclude NVIDIA here if we already handled them via nvidia-smi. + if [[ "$vendor_id" == "10de" ]] && command -v nvidia-smi &> /dev/null; then + return + fi + + # Get human readable name + full_info=$(lspci -s "$slot" -vmm) + vendor_name=$(echo "$full_info" | grep "^Vendor:" | cut -f2-) + device_name=$(echo "$full_info" | grep "^Device:" | cut -f2-) + + description="$vendor_name $device_name" + pci_id="0000:$slot" + + # Determine driver + driver="" + if [[ "$vendor_id" == "1002" ]]; then # AMD + driver="amdgpu" + fi + + # Construct JSON + jq -c -n \ + --arg desc "$description" \ + --arg driver "$driver" \ + --arg pci_id "$pci_id" \ + '{ + description: $desc, + init: { + deviceRequests: { + Driver: (if $driver != "" then $driver else null end), + DeviceIDs: [$pci_id] + } + } + }' +} + +# Function to get all GPUs in JSON array format +get_all_gpus_json() { + ( + get_nvidia_gpus + get_generic_gpus + ) | jq -s ' + group_by(.description) | map({ + id: (.[0].description | ascii_downcase | gsub("[^a-z0-9]"; "-") | gsub("-+"; "-") | sub("^-"; "") | sub("-$"; "")), + description: .[0].description, + type: "gpu", + total: length, + init: { + deviceRequests: { + Driver: .[0].init.deviceRequests.Driver, + DeviceIDs: (map(.init.deviceRequests.DeviceIDs[]) | unique), + Capabilities: [["gpu"]] + } + } + }) | map(if .init.deviceRequests.Driver == null then del(.init.deviceRequests.Driver) else . end) + ' +} + +# Main execution only if script is not sourced +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + get_all_gpus_json +fi diff --git a/scripts/ocean-node-quickstart.sh b/scripts/ocean-node-quickstart.sh index 4a1ed9c97..964017dd0 100755 --- a/scripts/ocean-node-quickstart.sh +++ b/scripts/ocean-node-quickstart.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # -# Copyright (c) 2024 Ocean Protocol contributors +# Copyright (c) 2026 Ocean Protocol contributors # SPDX-License-Identifier: Apache-2.0 # @@ -100,6 +100,13 @@ read P2P_ipV6BindWsPort P2P_ipV6BindWsPort=${P2P_ipV6BindWsPort:-9003} validate_port "$P2P_ipV6BindWsPort" +P2P_ENABLE_UPNP='false' +read -p "Enable UPnP (useful in case you can no set up port forwarding)? [ y/n ]: " enable_upnp +if [ "$enable_upnp" == "y" ]; then + P2P_ENABLE_UPNP='true' +fi + + read -p "Provide the public IPv4 address or FQDN where this node will be accessible: " P2P_ANNOUNCE_ADDRESS if [ -n "$P2P_ANNOUNCE_ADDRESS" ]; then @@ -142,9 +149,83 @@ fi # Set default compute environments if not already defined if [ -z "$DOCKER_COMPUTE_ENVIRONMENTS" ]; then echo "Setting default DOCKER_COMPUTE_ENVIRONMENTS configuration" - export DOCKER_COMPUTE_ENVIRONMENTS="[{\"socketPath\":\"/var/run/docker.sock\",\"resources\":[{\"id\":\"disk\",\"total\":10}],\"storageExpiry\":604800,\"maxJobDuration\":36000,\"minJobDuration\":60,\"fees\":{\"1\":[{\"feeToken\":\"0x123\",\"prices\":[{\"id\":\"cpu\",\"price\":1}]}]},\"free\":{\"maxJobDuration\":360000,\"minJobDuration\":60,\"maxJobs\":3,\"resources\":[{\"id\":\"cpu\",\"max\":1},{\"id\":\"ram\",\"max\":1},{\"id\":\"disk\",\"max\":1}]}}]" + export DOCKER_COMPUTE_ENVIRONMENTS='[ + { + "socketPath": "/var/run/docker.sock", + "resources": [ + { + "id": "disk", + "total": 10 + } + ], + "storageExpiry": 604800, + "maxJobDuration": 36000, + "minJobDuration": 60, + "fees": { + "1": [ + { + "feeToken": "0x123", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + } + ] + }, + "free": { + "maxJobDuration": 360000, + "minJobDuration": 60, + "maxJobs": 3, + "resources": [ + { + "id": "cpu", + "max": 1 + }, + { + "id": "ram", + "max": 1 + }, + { + "id": "disk", + "max": 1 + } + ] + } + } + ]' fi +# GPU Detection and Integration +LIST_GPUS_SCRIPT="$(dirname "$0")/list_gpus.sh" +if [ -f "$LIST_GPUS_SCRIPT" ] && command -v jq &> /dev/null; then + echo "Checking for GPUs..." + source "$LIST_GPUS_SCRIPT" + DETECTED_GPUS=$(get_all_gpus_json) + + # Check if we got any GPUs (array not empty) + GPU_COUNT=$(echo "$DETECTED_GPUS" | jq 'length') + + if [ "$GPU_COUNT" -gt 0 ]; then + echo "Detected $GPU_COUNT GPU type(s). Updating configuration..." + + # Merge detected GPUs into the resources array of the first environment + # We use jq to append the detected GPU objects to existing resources + DOCKER_COMPUTE_ENVIRONMENTS=$(echo "$DOCKER_COMPUTE_ENVIRONMENTS" | jq --argjson gpus "$DETECTED_GPUS" '.[0].resources += $gpus') + + # Also update free resources to include GPUs if desired, or at least the pricing? + # For now, let's just ensure they are in the available resources list. + echo "GPUs added to Compute Environment resources." + else + echo "No GPUs detected." + fi +else + echo "Skipping GPU detection (script not found or jq missing)." +fi + +echo $DOCKER_COMPUTE_ENVIRONMENTS + cat < docker-compose.yml services: ocean-node: @@ -205,7 +286,7 @@ services: # P2P_mDNSInterval: '' # P2P_connectionsMaxParallelDials: '' # P2P_connectionsDialTimeout: '' -# P2P_ENABLE_UPNP: '' + P2P_ENABLE_UPNP: '$P2P_ENABLE_UPNP' # P2P_ENABLE_AUTONAT: '' # P2P_ENABLE_CIRCUIT_RELAY_SERVER: '' # P2P_ENABLE_CIRCUIT_RELAY_CLIENT: '' From ea3cdf8358eb274cfa847635a9224a1ce195c554 Mon Sep 17 00:00:00 2001 From: ndrpp Date: Tue, 13 Jan 2026 15:49:00 +0200 Subject: [PATCH 2/2] feat: update README for quickstart script --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 705261d69..e5137efe1 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,11 @@ npm run quickstart This command will run you through the process of setting up the environmental variables for your node. +> [!NOTE] +> The quickstart script attempts to automatically detect GPUs (NVIDIA via `nvidia-smi`, others via `lspci`) and appends them to your `DOCKER_COMPUTE_ENVIRONMENTS`. +> If you choose to manually configure `DOCKER_COMPUTE_ENVIRONMENTS` before running the script (e.g. via environment variable), be aware that auto-detected GPUs will be **merged** into your configuration, which could lead to duplication if you already manually defined them. +> For most users, it is recommended to let the script handle GPU detection automatically. + ## Option 3: Running Ocean Nodes with PM2 PM2 is a process manager that makes it easy to manage and monitor your Node.js applications.