diff --git a/README.md b/README.md index e80aeb7..5ab9965 100644 --- a/README.md +++ b/README.md @@ -88,3 +88,4 @@ Once done, just go to and login as "admin" with . | AGENT | Toggles agent mode. If enabled, there will be no web interface available, and all logs will be sent and stored on HOST | `false` | - | HOST | Url to OnLogs host from protocol to domain name. | | if `AGENT=true` | ONLOGS_TOKEN | Token that will use an agent to authorize and connect to HOST | Generates with OnLogs interface | if `AGENT=true` +| MAX_LOGS_SIZE | Maximum allowed total logs size before cleanup triggers. Accepts human-readable formats like 5GB, 500MB, 1.5GB etc. When exceeded, 10% of logs (by count) will be removed proportionally across containers starting from oldest | 10GB | - diff --git a/application/Dockerfile b/application/Dockerfile index 15c0a63..c5c57c2 100644 --- a/application/Dockerfile +++ b/application/Dockerfile @@ -21,10 +21,10 @@ ADD backend/. /backend/ WORKDIR /backend/ RUN go mod download \ - && go build -o main . + && go build -o onlogs . FROM alpine COPY --from=frontbuilder /code/dist/ /dist/ -COPY --from=backendbuilder /backend/main /backend/main -CMD ["/backend/main"] +COPY --from=backendbuilder /backend/onlogs /backend/onlogs +CMD ["/backend/onlogs"] diff --git a/application/backend/app/containerdb/containerdb.go b/application/backend/app/containerdb/containerdb.go index bbd88c5..0a71a00 100644 --- a/application/backend/app/containerdb/containerdb.go +++ b/application/backend/app/containerdb/containerdb.go @@ -4,21 +4,22 @@ import ( "fmt" "os" "strings" + "sync" "time" "github.com/devforth/OnLogs/app/util" "github.com/devforth/OnLogs/app/vars" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/iterator" + leveldbUtil "github.com/syndtr/goleveldb/leveldb/util" ) func GetLogStatusKey(message string) string { - if strings.Contains(message, "ERROR") || strings.Contains(message, "ERR") || // const statuses_errors = ["ERROR", "ERR", "Error", "Err"]; - strings.Contains(message, "Error") || strings.Contains(message, "Err") { + if strings.Contains(message, "ERROR") || strings.Contains(message, "ERR") { return "error" - } else if strings.Contains(message, "WARN") || strings.Contains(message, "WARNING") { // const statuses_warnings = ["WARN", "WARNING"]; + } else if strings.Contains(message, "WARN") || strings.Contains(message, "WARNING") { return "warn" - } else if strings.Contains(message, "DEBUG") { // const statuses_other = ["DEBUG", "INFO", "ONLOGS"]; + } else if strings.Contains(message, "DEBUG") { return "debug" } else if strings.Contains(message, "INFO") { return "info" @@ -28,6 +29,215 @@ func GetLogStatusKey(message string) string { return "other" } +func checkAndManageLogSize(host string, container string) error { + maxSize, err := util.ParseHumanReadableSize(os.Getenv("MAX_LOGS_SIZE")) + if err != nil { + return fmt.Errorf("failed to parse MAX_LOGS_SIZE: %v", err) + } + + for { + hosts, err := os.ReadDir("leveldb/hosts/") + if err != nil { + return fmt.Errorf("failed to read hosts directory: %v", err) + } + + var totalSize int64 + for _, h := range hosts { + hostName := h.Name() + containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers") + for _, c := range containers { + containerName := c.Name() + size := util.GetDirSize(hostName, containerName) + totalSize += int64(size * 1024 * 1024) + } + } + + fmt.Printf("Max size: %d, current dir size: %d\n", maxSize, totalSize) + if totalSize <= maxSize { + break + } + + var cutoffKeys [][]byte + for _, h := range hosts { + hostName := h.Name() + containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers") + for _, c := range containers { + containerName := c.Name() + logsDB := util.GetDB(hostName, containerName, "logs") + if logsDB == nil { + continue + } + + cutoffKeysForContainer, err := getCutoffKeysForContainer(logsDB, 200) + if err != nil || len(cutoffKeysForContainer) == 0 { + continue + } + cutoffKeys = append(cutoffKeys, cutoffKeysForContainer) + } + } + + if len(cutoffKeys) == 0 { + fmt.Println("Nothing to delete, cutoff keys not found.") + break + } + + oldestCutoffKey := findOldestCutoffKey(cutoffKeys) + oldestTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(oldestCutoffKey))) + if err != nil { + fmt.Println("Error parsing oldest time:", err) + break + } + fmt.Println("Oldest time for deletion cutoff:", oldestTime) + + for _, h := range hosts { + hostName := h.Name() + containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers") + for _, c := range containers { + containerName := c.Name() + logsDB := util.GetDB(hostName, containerName, "logs") + if logsDB == nil { + continue + } + + batch := new(leveldb.Batch) + deletedCount := 0 + iter := logsDB.NewIterator(nil, nil) + + count := 0 + for ok := iter.First(); ok && count < 200; ok = iter.Next() { + count++ + keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(iter.Key()))) + if err != nil { + fmt.Println("Error parsing key time:", err) + continue + } + if keyTime.Before(oldestTime) || keyTime.Equal(oldestTime) { + batch.Delete(iter.Key()) + deletedCount++ + } + } + iter.Release() + + if deletedCount > 0 { + err = logsDB.Write(batch, nil) + if err != nil { + fmt.Printf("Failed to delete batch in %s/%s: %v\n", hostName, containerName, err) + } else { + fmt.Printf("Deleted %d logs from %s/%s\n", deletedCount, hostName, containerName) + } + logsDB.CompactRange(leveldbUtil.Range{Start: nil, Limit: nil}) + } + + statusesDB := util.GetDB(hostName, containerName, "statuses") + if statusesDB != nil { + batch := new(leveldb.Batch) + deletedCountStatuses := 0 + iter := statusesDB.NewIterator(nil, nil) + + for ok := iter.First(); ok; ok = iter.Next() { + keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(iter.Key()))) + if err != nil { + fmt.Println("Error parsing key time:", err) + continue + } + if keyTime.Before(oldestTime) || keyTime.Equal(oldestTime) { + batch.Delete(iter.Key()) + deletedCountStatuses++ + } + } + iter.Release() + + if deletedCountStatuses > 0 { + err := statusesDB.Write(batch, nil) + if err != nil { + fmt.Printf("Failed to delete batch in statusesDB for %s/%s: %v\n", hostName, containerName, err) + } + statusesDB.CompactRange(leveldbUtil.Range{Start: nil, Limit: nil}) + } + } + } + } + + time.Sleep(100 * time.Millisecond) + } + + return nil +} + +func getCutoffKeysForContainer(db *leveldb.DB, limit int) ([]byte, error) { + iter := db.NewIterator(nil, nil) + defer iter.Release() + + var cutoffKeys [][]byte + for ok := iter.First(); ok && len(cutoffKeys) < limit; ok = iter.Next() { + key := append([]byte{}, iter.Key()...) + cutoffKeys = append(cutoffKeys, key) + } + + if len(cutoffKeys) < limit { + return nil, fmt.Errorf("insufficient records to form cutoff keys") + } + + return cutoffKeys[len(cutoffKeys)-1], nil +} + +func findOldestCutoffKey(cutoffKeys [][]byte) []byte { + var oldestKey []byte + var oldestTime time.Time + first := true + + for _, key := range cutoffKeys { + keyStr := string(key) + keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(keyStr)) + if err != nil { + fmt.Println("Error parsing key time:", err) + continue + } + + if first || keyTime.Before(oldestTime) { + oldestKey = key + oldestTime = keyTime + first = false + } + } + return oldestKey +} + +var ( + logCleanupMu sync.Mutex + nextCleanup time.Time + isCleanupRunning bool +) + +func MaybeScheduleCleanup(host string, container string) { + logCleanupMu.Lock() + + defer logCleanupMu.Unlock() + + if isCleanupRunning { + return + } + if time.Now().Before(nextCleanup) { + return + } + + isCleanupRunning = true + + go func() { + err := checkAndManageLogSize(host, container) + + logCleanupMu.Lock() + defer logCleanupMu.Unlock() + + isCleanupRunning = false + nextCleanup = time.Now().Add(1 * time.Minute) + + if err != nil { + fmt.Printf("Log cleanup failed: %v\n", err) + } + }() +} + func PutLogMessage(db *leveldb.DB, host string, container string, message_item []string) error { if len(message_item[0]) < 30 { fmt.Println("WARNING: got broken timestamp: ", "timestamp: "+message_item[0], "message: "+message_item[1]) @@ -37,6 +247,9 @@ func PutLogMessage(db *leveldb.DB, host string, container string, message_item [ if host == "" { panic("Host is not mentioned!") } + + MaybeScheduleCleanup(host, container) + location := host + "/" + container if vars.Statuses_DBs[location] == nil { vars.Statuses_DBs[location] = util.GetDB(host, container, "statuses") diff --git a/application/backend/app/util/util.go b/application/backend/app/util/util.go index 46a7a7e..37689f4 100644 --- a/application/backend/app/util/util.go +++ b/application/backend/app/util/util.go @@ -10,6 +10,7 @@ import ( "net/http" "os" "path/filepath" + "strconv" "strings" "time" @@ -298,3 +299,33 @@ func GetStorageData() map[string]float64 { // time.Sleep(time.Second * 30) // } // } + +var units = []struct { + Suffix string + Multiplier int64 +}{ + {"TB", 1024 * 1024 * 1024 * 1024}, + {"T", 1024 * 1024 * 1024 * 1024}, + {"GB", 1024 * 1024 * 1024}, + {"G", 1024 * 1024 * 1024}, + {"MB", 1024 * 1024}, + {"M", 1024 * 1024}, + {"KB", 1024}, + {"K", 1024}, + {"B", 1}, +} + +func ParseHumanReadableSize(sizeStr string) (int64, error) { + sizeStr = strings.TrimSpace(strings.ToUpper(sizeStr)) + for _, unit := range units { + if strings.HasSuffix(sizeStr, unit.Suffix) { + numStr := strings.TrimSuffix(sizeStr, unit.Suffix) + num, err := strconv.ParseFloat(numStr, 64) + if err != nil { + return 0, fmt.Errorf("invalid number in size: %s", numStr) + } + return int64(num * float64(unit.Multiplier)), nil + } + } + return 0, fmt.Errorf("unknown size unit in: %s", sizeStr) +} diff --git a/application/backend/main.go b/application/backend/main.go index 4eb84d7..ae62c07 100644 --- a/application/backend/main.go +++ b/application/backend/main.go @@ -29,6 +29,11 @@ func init_config() { if os.Getenv("DOCKER_SOCKET_PATH") == "" { os.Setenv("DOCKER_SOCKET_PATH", "/var/run/docker.sock") } + + if os.Getenv("MAX_LOGS_SIZE") == "" { + os.Setenv("MAX_LOGS_SIZE", "10GB") + } + fmt.Println("INFO: OnLogs configs done!") } diff --git a/application/build.sh b/application/build.sh index 1e7bc23..b9cd3f6 100755 --- a/application/build.sh +++ b/application/build.sh @@ -1,4 +1,2 @@ # docker buildx create --use -docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.2" --push . -# docker run -v /var/run/docker.sock:/var/run/docker.sock --rm -it $(docker build -q -f Dockerfile .) -# docker build . -t devforth/onlogs && docker push devforth/onlogs +docker buildx build --load --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.4" . diff --git a/application/frontend/src/lib/CheckBox/CheckBox.scss b/application/frontend/src/lib/CheckBox/CheckBox.scss index 96b6e83..cbb19c5 100644 --- a/application/frontend/src/lib/CheckBox/CheckBox.scss +++ b/application/frontend/src/lib/CheckBox/CheckBox.scss @@ -7,7 +7,7 @@ align-items: center; padding: 5px; box-sizing: border-box; - justify-content: end; + justify-content: start; cursor: pointer; position: relative; } @@ -16,15 +16,13 @@ .checkboxRoll { background-color: $active-color; position: absolute; - // left: 0; - transform: translateX(-85%); + transform: translateX(85%); transition: all 100ms; } } .inactive { .checkboxRoll { - right: 0; transform: translateX(0); transition: all 200ms; } diff --git a/application/frontend/src/lib/DropDown/DropDown.scss b/application/frontend/src/lib/DropDown/DropDown.scss index 2d08fa4..558fb05 100644 --- a/application/frontend/src/lib/DropDown/DropDown.scss +++ b/application/frontend/src/lib/DropDown/DropDown.scss @@ -25,7 +25,7 @@ right: 100%; transform: translate(25%); .dropDownRawEl.text { - margin-right: 0px; + margin-right: 5px; } } diff --git a/application/release.sh b/application/release.sh new file mode 100755 index 0000000..a5a9ca8 --- /dev/null +++ b/application/release.sh @@ -0,0 +1,4 @@ +# docker buildx create --use +docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.6" --push . +# docker run -v /var/run/docker.sock:/var/run/docker.sock --rm -it $(docker build -q -f Dockerfile .) +# docker build . -t devforth/onlogs && docker push devforth/onlogs