From 65339a483450fb7cf4cd0fe81f09e5e4613f541b Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Tue, 22 Apr 2025 14:53:26 +0300 Subject: [PATCH 01/10] Add log size management feature - Introduced MAX_LOGS_SIZE environment variable to set the maximum allowed log size. - Implemented log size checking and cleanup logic in the container database. - Added utility function to parse human-readable size formats. - Updated README to document the new MAX_LOGS_SIZE configuration option. --- README.md | 1 + .../backend/app/containerdb/containerdb.go | 152 +++++++++++++++++- application/backend/app/util/util.go | 27 ++++ application/backend/main.go | 5 + 4 files changed, 182 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e80aeb7..00832d3 100644 --- a/README.md +++ b/README.md @@ -88,3 +88,4 @@ Once done, just go to and login as "admin" with . | AGENT | Toggles agent mode. If enabled, there will be no web interface available, and all logs will be sent and stored on HOST | `false` | - | HOST | Url to OnLogs host from protocol to domain name. | | if `AGENT=true` | ONLOGS_TOKEN | Token that will use an agent to authorize and connect to HOST | Generates with OnLogs interface | if `AGENT=true` +| MAX_LOGS_SIZE | Maximum allowed total logs size before cleanup triggers. Accepts human-readable formats like 5GB, 500MB, 1.5GB etc. When exceeded, 30% of logs (by count) will be removed proportionally across containers starting from oldest | 5GB | - diff --git a/application/backend/app/containerdb/containerdb.go b/application/backend/app/containerdb/containerdb.go index bbd88c5..fe94955 100644 --- a/application/backend/app/containerdb/containerdb.go +++ b/application/backend/app/containerdb/containerdb.go @@ -1,15 +1,19 @@ package containerdb import ( + "bytes" "fmt" "os" + "sort" "strings" + "sync" "time" "github.com/devforth/OnLogs/app/util" "github.com/devforth/OnLogs/app/vars" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/iterator" + leveldbUtil "github.com/syndtr/goleveldb/leveldb/util" ) func GetLogStatusKey(message string) string { @@ -28,6 +32,146 @@ func GetLogStatusKey(message string) string { return "other" } +func checkAndManageLogSize(host string, container string) error { + maxSize, err := util.ParseHumanReadableSize(os.Getenv("MAX_LOGS_SIZE")) + if err != nil { + return fmt.Errorf("failed to parse MAX_LOGS_SIZE: %v", err) + } + + hosts, err := os.ReadDir("leveldb/hosts/") + if err != nil { + return fmt.Errorf("failed to read hosts directory: %v", err) + } + + type logEntryMeta struct { + host string + container string + key []byte + size int64 + } + + var allLogs []logEntryMeta + var totalLogBytes int64 + var totalSize int64 + var sizeBuffer int64 + for _, h := range hosts { + hostName := h.Name() + containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers") + for _, c := range containers { + containerName := c.Name() + logsDB := util.GetDB(hostName, containerName, "logs") + if logsDB == nil { + continue + } + size := util.GetDirSize(h.Name(), c.Name()) + containerSizeBytes := int64(size * 1024 * 1024) + iter := logsDB.NewIterator(nil, nil) + for iter.Next() { + key := iter.Key() + val := iter.Value() + size := int64(len(key) + len(val)) + totalLogBytes += size + allLogs = append(allLogs, logEntryMeta{ + host: hostName, + container: containerName, + key: append([]byte{}, key...), + size: size, + }) + + } + sizeBuffer += containerSizeBytes + iter.Release() + } + } + totalSize = sizeBuffer + fmt.Printf("Max size: %d, dir size: %d\n", maxSize, int64(totalSize)) + + if maxSize > int64(totalSize) { + return nil + } + + fmt.Printf("Total logical log size: %d bytes\n", totalLogBytes) + + if len(allLogs) == 0 { + fmt.Println("No logs found.") + return nil + } + + bytesToDelete := int64(float64(totalLogBytes) * 0.20) + deletedBytes := int64(0) + + sort.Slice(allLogs, func(i, j int) bool { + return bytes.Compare(allLogs[i].key, allLogs[j].key) < 0 + }) + + batches := make(map[string]*leveldb.Batch) + statusesDBs := make(map[string]*leveldb.DB) + + for _, entry := range allLogs { + if deletedBytes >= bytesToDelete { + break + } + location := entry.host + "/" + entry.container + if batches[location] == nil { + batches[location] = new(leveldb.Batch) + } + batches[location].Delete(entry.key) + deletedBytes += entry.size + + if statusesDBs[location] == nil { + statusesDBs[location] = util.GetDB(entry.host, entry.container, "statuses") + } + if statusesDBs[location] != nil { + statusesDBs[location].Delete(entry.key, nil) + } + } + + for location, batch := range batches { + parts := strings.Split(location, "/") + host, container := parts[0], parts[1] + db := util.GetDB(host, container, "logs") + if db == nil { + continue + } + + err := db.Write(batch, nil) + if err != nil { + fmt.Printf("Failed to delete batch in %s/%s: %v\n", host, container, err) + } else { + fmt.Printf("Deleted %d logs from %s/%s\n", batch.Len(), host, container) + } + db.CompactRange(leveldbUtil.Range{Start: nil, Limit: nil}) + if statusesDBs[location] != nil { + statusesDBs[location].CompactRange(leveldbUtil.Range{Start: nil, Limit: nil}) + } + } + + fmt.Printf("Deleted total: %d bytes (target: %d = 30%%)\n", deletedBytes, bytesToDelete) + return nil +} + +var ( + logCleanupMu sync.Mutex + nextCleanup time.Time +) + +func MaybeScheduleCleanup(host string, container string) { + logCleanupMu.Lock() + defer logCleanupMu.Unlock() + + if time.Now().Before(nextCleanup) { + return + } + nextCleanup = time.Now().Add(1 * time.Minute) + go func() { + time.Sleep(1 * time.Minute) + err := checkAndManageLogSize(host, container) + if err != nil { + fmt.Printf("Log cleanup failed: %v\n", err) + } + }() +} + func PutLogMessage(db *leveldb.DB, host string, container string, message_item []string) error { if len(message_item[0]) < 30 { fmt.Println("WARNING: got broken timestamp: ", "timestamp: "+message_item[0], "message: "+message_item[1]) @@ -35,8 +179,10 @@ func PutLogMessage(db *leveldb.DB, host string, container string, message_item [ } if host == "" { - panic("Host is not mentioned!") + return fmt.Errorf("host is not mentioned") } + MaybeScheduleCleanup(host, container) + location := host + "/" + container if vars.Statuses_DBs[location] == nil { vars.Statuses_DBs[location] = util.GetDB(host, container, "statuses") @@ -56,9 +202,9 @@ func PutLogMessage(db *leveldb.DB, host string, container string, message_item [ tries++ } if err != nil { - panic(err) + return fmt.Errorf("failed to write log after %d tries: %v", tries, err) } - return err + return nil } func fitsForSearch(logLine string, message string, caseSensetivity bool) bool { diff --git a/application/backend/app/util/util.go b/application/backend/app/util/util.go index 46a7a7e..c324977 100644 --- a/application/backend/app/util/util.go +++ b/application/backend/app/util/util.go @@ -10,6 +10,7 @@ import ( "net/http" "os" "path/filepath" + "strconv" "strings" "time" @@ -298,3 +299,29 @@ func GetStorageData() map[string]float64 { // time.Sleep(time.Second * 30) // } // } + +var units = []struct { + Suffix string + Multiplier int64 +}{ + {"TB", 1024 * 1024 * 1024 * 1024}, + {"GB", 1024 * 1024 * 1024}, + {"MB", 1024 * 1024}, + {"KB", 1024}, + {"B", 1}, +} + +func ParseHumanReadableSize(sizeStr string) (int64, error) { + sizeStr = strings.TrimSpace(strings.ToUpper(sizeStr)) + for _, unit := range units { + if strings.HasSuffix(sizeStr, unit.Suffix) { + numStr := strings.TrimSuffix(sizeStr, unit.Suffix) + num, err := strconv.ParseFloat(numStr, 64) + if err != nil { + return 0, fmt.Errorf("invalid number in size: %s", numStr) + } + return int64(num * float64(unit.Multiplier)), nil + } + } + return 0, fmt.Errorf("unknown size unit in: %s", sizeStr) +} diff --git a/application/backend/main.go b/application/backend/main.go index 4eb84d7..62c9753 100644 --- a/application/backend/main.go +++ b/application/backend/main.go @@ -29,6 +29,11 @@ func init_config() { if os.Getenv("DOCKER_SOCKET_PATH") == "" { os.Setenv("DOCKER_SOCKET_PATH", "/var/run/docker.sock") } + + if os.Getenv("MAX_LOGS_SIZE") == "" { + os.Setenv("MAX_LOGS_SIZE", "5GB") + } + fmt.Println("INFO: OnLogs configs done!") } From 46eef3ae92f962bef350c7069da378d92952e86e Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Tue, 22 Apr 2025 14:54:51 +0300 Subject: [PATCH 02/10] Update CheckBox and DropDown styles - Changed CheckBox alignment from end to start for better layout consistency. - Adjusted CheckBox roll transformation for improved visual feedback. - Modified DropDown text margin for better spacing. --- application/frontend/src/lib/CheckBox/CheckBox.scss | 6 ++---- application/frontend/src/lib/DropDown/DropDown.scss | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/application/frontend/src/lib/CheckBox/CheckBox.scss b/application/frontend/src/lib/CheckBox/CheckBox.scss index 96b6e83..cbb19c5 100644 --- a/application/frontend/src/lib/CheckBox/CheckBox.scss +++ b/application/frontend/src/lib/CheckBox/CheckBox.scss @@ -7,7 +7,7 @@ align-items: center; padding: 5px; box-sizing: border-box; - justify-content: end; + justify-content: start; cursor: pointer; position: relative; } @@ -16,15 +16,13 @@ .checkboxRoll { background-color: $active-color; position: absolute; - // left: 0; - transform: translateX(-85%); + transform: translateX(85%); transition: all 100ms; } } .inactive { .checkboxRoll { - right: 0; transform: translateX(0); transition: all 200ms; } diff --git a/application/frontend/src/lib/DropDown/DropDown.scss b/application/frontend/src/lib/DropDown/DropDown.scss index 2d08fa4..558fb05 100644 --- a/application/frontend/src/lib/DropDown/DropDown.scss +++ b/application/frontend/src/lib/DropDown/DropDown.scss @@ -25,7 +25,7 @@ right: 100%; transform: translate(25%); .dropDownRawEl.text { - margin-right: 0px; + margin-right: 5px; } } From 30c3dc6f1e88d4bacab69898024b4a3d20be84d3 Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Tue, 22 Apr 2025 16:54:23 +0300 Subject: [PATCH 03/10] Adjust log deletion target in log size management --- application/backend/app/containerdb/containerdb.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/application/backend/app/containerdb/containerdb.go b/application/backend/app/containerdb/containerdb.go index fe94955..5691e6b 100644 --- a/application/backend/app/containerdb/containerdb.go +++ b/application/backend/app/containerdb/containerdb.go @@ -97,7 +97,7 @@ func checkAndManageLogSize(host string, container string) error { return nil } - bytesToDelete := int64(float64(totalLogBytes) * 0.20) + bytesToDelete := int64(float64(totalLogBytes) * 0.10) deletedBytes := int64(0) sort.Slice(allLogs, func(i, j int) bool { @@ -146,7 +146,7 @@ func checkAndManageLogSize(host string, container string) error { } } - fmt.Printf("Deleted total: %d bytes (target: %d = 30%%)\n", deletedBytes, bytesToDelete) + fmt.Printf("Deleted total: %d bytes (target: %d = 10%%)\n", deletedBytes, bytesToDelete) return nil } From eb9a57931b645fff53ea56b8252b04a5bf43f9af Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Thu, 24 Apr 2025 11:06:02 +0300 Subject: [PATCH 04/10] Update MAX_LOGS_SIZE to 10GB --- README.md | 2 +- application/backend/main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 00832d3..5ab9965 100644 --- a/README.md +++ b/README.md @@ -88,4 +88,4 @@ Once done, just go to and login as "admin" with . | AGENT | Toggles agent mode. If enabled, there will be no web interface available, and all logs will be sent and stored on HOST | `false` | - | HOST | Url to OnLogs host from protocol to domain name. | | if `AGENT=true` | ONLOGS_TOKEN | Token that will use an agent to authorize and connect to HOST | Generates with OnLogs interface | if `AGENT=true` -| MAX_LOGS_SIZE | Maximum allowed total logs size before cleanup triggers. Accepts human-readable formats like 5GB, 500MB, 1.5GB etc. When exceeded, 30% of logs (by count) will be removed proportionally across containers starting from oldest | 5GB | - +| MAX_LOGS_SIZE | Maximum allowed total logs size before cleanup triggers. Accepts human-readable formats like 5GB, 500MB, 1.5GB etc. When exceeded, 10% of logs (by count) will be removed proportionally across containers starting from oldest | 10GB | - diff --git a/application/backend/main.go b/application/backend/main.go index 62c9753..ae62c07 100644 --- a/application/backend/main.go +++ b/application/backend/main.go @@ -31,7 +31,7 @@ func init_config() { } if os.Getenv("MAX_LOGS_SIZE") == "" { - os.Setenv("MAX_LOGS_SIZE", "5GB") + os.Setenv("MAX_LOGS_SIZE", "10GB") } fmt.Println("INFO: OnLogs configs done!") From 3fde4df1c061aae3c41912262a4062e6ea8ecd42 Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Thu, 24 Apr 2025 11:19:19 +0300 Subject: [PATCH 05/10] Add shorthand notations for data size units in utility function --- application/backend/app/util/util.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/application/backend/app/util/util.go b/application/backend/app/util/util.go index c324977..37689f4 100644 --- a/application/backend/app/util/util.go +++ b/application/backend/app/util/util.go @@ -305,9 +305,13 @@ var units = []struct { Multiplier int64 }{ {"TB", 1024 * 1024 * 1024 * 1024}, + {"T", 1024 * 1024 * 1024 * 1024}, {"GB", 1024 * 1024 * 1024}, + {"G", 1024 * 1024 * 1024}, {"MB", 1024 * 1024}, + {"M", 1024 * 1024}, {"KB", 1024}, + {"K", 1024}, {"B", 1}, } From f1c7a854716341633ac74dab3632c8e4d66ebac0 Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Thu, 24 Apr 2025 11:41:03 +0300 Subject: [PATCH 06/10] Refactor error handling in PutLogMessage function --- application/backend/app/containerdb/containerdb.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/application/backend/app/containerdb/containerdb.go b/application/backend/app/containerdb/containerdb.go index 5691e6b..5a60bc1 100644 --- a/application/backend/app/containerdb/containerdb.go +++ b/application/backend/app/containerdb/containerdb.go @@ -179,8 +179,9 @@ func PutLogMessage(db *leveldb.DB, host string, container string, message_item [ } if host == "" { - return fmt.Errorf("host is not mentioned") + panic("Host is not mentioned!") } + MaybeScheduleCleanup(host, container) location := host + "/" + container @@ -202,9 +203,9 @@ func PutLogMessage(db *leveldb.DB, host string, container string, message_item [ tries++ } if err != nil { - return fmt.Errorf("failed to write log after %d tries: %v", tries, err) + panic(err) } - return nil + return err } func fitsForSearch(logLine string, message string, caseSensetivity bool) bool { From 4f8538732d1ff21c9ee5cd6a99e4e9ef9a32e0fa Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Thu, 24 Apr 2025 11:50:12 +0300 Subject: [PATCH 07/10] Update Docker build tags and add release script for version 1.1.4 --- application/build.sh | 4 +--- application/release.sh | 4 ++++ 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100755 application/release.sh diff --git a/application/build.sh b/application/build.sh index 1e7bc23..43643dd 100755 --- a/application/build.sh +++ b/application/build.sh @@ -1,4 +1,2 @@ # docker buildx create --use -docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.2" --push . -# docker run -v /var/run/docker.sock:/var/run/docker.sock --rm -it $(docker build -q -f Dockerfile .) -# docker build . -t devforth/onlogs && docker push devforth/onlogs +docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.4" . diff --git a/application/release.sh b/application/release.sh new file mode 100755 index 0000000..9b9e618 --- /dev/null +++ b/application/release.sh @@ -0,0 +1,4 @@ +# docker buildx create --use +docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.4" --push . +# docker run -v /var/run/docker.sock:/var/run/docker.sock --rm -it $(docker build -q -f Dockerfile .) +# docker build . -t devforth/onlogs && docker push devforth/onlogs From a50c740da5a9681613bd314e75fcd63ee62ed29d Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Fri, 25 Apr 2025 14:05:04 +0300 Subject: [PATCH 08/10] Refactor log management and update Docker build script - Updated Docker build command to use --load for improved image handling. - Changed output binary name from 'main' to 'onlogs' in the Dockerfile. - Simplified log status key determination by removing redundant checks. - Enhanced log size management logic to ensure efficient cleanup of old logs. --- application/Dockerfile | 6 +- .../backend/app/containerdb/containerdb.go | 267 +++++++++++------- application/build.sh | 2 +- 3 files changed, 171 insertions(+), 104 deletions(-) diff --git a/application/Dockerfile b/application/Dockerfile index 15c0a63..c5c57c2 100644 --- a/application/Dockerfile +++ b/application/Dockerfile @@ -21,10 +21,10 @@ ADD backend/. /backend/ WORKDIR /backend/ RUN go mod download \ - && go build -o main . + && go build -o onlogs . FROM alpine COPY --from=frontbuilder /code/dist/ /dist/ -COPY --from=backendbuilder /backend/main /backend/main -CMD ["/backend/main"] +COPY --from=backendbuilder /backend/onlogs /backend/onlogs +CMD ["/backend/onlogs"] diff --git a/application/backend/app/containerdb/containerdb.go b/application/backend/app/containerdb/containerdb.go index 5a60bc1..390dfde 100644 --- a/application/backend/app/containerdb/containerdb.go +++ b/application/backend/app/containerdb/containerdb.go @@ -1,10 +1,8 @@ package containerdb import ( - "bytes" "fmt" "os" - "sort" "strings" "sync" "time" @@ -17,12 +15,11 @@ import ( ) func GetLogStatusKey(message string) string { - if strings.Contains(message, "ERROR") || strings.Contains(message, "ERR") || // const statuses_errors = ["ERROR", "ERR", "Error", "Err"]; - strings.Contains(message, "Error") || strings.Contains(message, "Err") { + if strings.Contains(message, "ERROR") || strings.Contains(message, "ERR") { return "error" - } else if strings.Contains(message, "WARN") || strings.Contains(message, "WARNING") { // const statuses_warnings = ["WARN", "WARNING"]; + } else if strings.Contains(message, "WARN") || strings.Contains(message, "WARNING") { return "warn" - } else if strings.Contains(message, "DEBUG") { // const statuses_other = ["DEBUG", "INFO", "ONLOGS"]; + } else if strings.Contains(message, "DEBUG") { return "debug" } else if strings.Contains(message, "INFO") { return "info" @@ -38,134 +35,204 @@ func checkAndManageLogSize(host string, container string) error { return fmt.Errorf("failed to parse MAX_LOGS_SIZE: %v", err) } - hosts, err := os.ReadDir("leveldb/hosts/") - if err != nil { - return fmt.Errorf("failed to read hosts directory: %v", err) - } - - type logEntryMeta struct { - host string - container string - key []byte - size int64 - } + for { + hosts, err := os.ReadDir("leveldb/hosts/") + if err != nil { + return fmt.Errorf("failed to read hosts directory: %v", err) + } - var allLogs []logEntryMeta - var totalLogBytes int64 - var totalSize int64 - var sizeBuffer int64 - for _, h := range hosts { - hostName := h.Name() - containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers") - for _, c := range containers { - containerName := c.Name() - logsDB := util.GetDB(hostName, containerName, "logs") - if logsDB == nil { - continue + var totalSize int64 + for _, h := range hosts { + hostName := h.Name() + containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers") + for _, c := range containers { + containerName := c.Name() + size := util.GetDirSize(hostName, containerName) + totalSize += int64(size * 1024 * 1024) } - size := util.GetDirSize(h.Name(), c.Name()) - containerSizeBytes := int64(size * 1024 * 1024) - iter := logsDB.NewIterator(nil, nil) - for iter.Next() { - key := iter.Key() - val := iter.Value() - size := int64(len(key) + len(val)) - totalLogBytes += size - allLogs = append(allLogs, logEntryMeta{ - host: hostName, - container: containerName, - key: append([]byte{}, key...), - size: size, - }) + } + + fmt.Printf("Max size: %d, current dir size: %d\n", maxSize, totalSize) + if totalSize <= maxSize { + break + } + var cutoffKeys [][]byte + for _, h := range hosts { + hostName := h.Name() + containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers") + for _, c := range containers { + containerName := c.Name() + logsDB := util.GetDB(hostName, containerName, "logs") + if logsDB == nil { + continue + } + + cutoffKeysForContainer, err := getCutoffKeysForContainer(logsDB, 200) + if err != nil || len(cutoffKeysForContainer) == 0 { + continue + } + cutoffKeys = append(cutoffKeys, cutoffKeysForContainer) } - sizeBuffer += containerSizeBytes - iter.Release() } - } - totalSize = sizeBuffer - fmt.Printf("Max size: %d, dir size: %d\n", maxSize, int64(totalSize)) - if maxSize > int64(totalSize) { - return nil - } + if len(cutoffKeys) == 0 { + fmt.Println("Nothing to delete, cutoff keys not found.") + break + } - fmt.Printf("Total logical log size: %d bytes\n", totalLogBytes) + oldestCutoffKey := findOldestCutoffKey(cutoffKeys) + oldestTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(oldestCutoffKey))) + if err != nil { + fmt.Println("Error parsing oldest time:", err) + break + } + fmt.Println("Oldest time for deletion cutoff:", oldestTime) + + for _, h := range hosts { + hostName := h.Name() + containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers") + for _, c := range containers { + containerName := c.Name() + logsDB := util.GetDB(hostName, containerName, "logs") + if logsDB == nil { + continue + } + + batch := new(leveldb.Batch) + deletedCount := 0 + iter := logsDB.NewIterator(nil, nil) + + count := 0 + for ok := iter.First(); ok && count < 200; ok = iter.Next() { + count++ + keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(iter.Key()))) + if err != nil { + fmt.Println("Error parsing key time:", err) + continue + } + if keyTime.Before(oldestTime) || keyTime.Equal(oldestTime) { + batch.Delete(iter.Key()) + deletedCount++ + } + } + iter.Release() + + if deletedCount > 0 { + err = logsDB.Write(batch, nil) + if err != nil { + fmt.Printf("Failed to delete batch in %s/%s: %v\n", hostName, containerName, err) + } else { + fmt.Printf("Deleted %d logs from %s/%s\n", deletedCount, hostName, containerName) + } + logsDB.CompactRange(leveldbUtil.Range{Start: nil, Limit: nil}) + } + + statusesDB := util.GetDB(hostName, containerName, "statuses") + if statusesDB != nil { + batch := new(leveldb.Batch) + deletedCountStatuses := 0 + iter := statusesDB.NewIterator(nil, nil) + + for ok := iter.First(); ok; ok = iter.Next() { + keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(iter.Key()))) + if err != nil { + fmt.Println("Error parsing key time:", err) + continue + } + if keyTime.Before(oldestTime) || keyTime.Equal(oldestTime) { + batch.Delete(iter.Key()) + deletedCountStatuses++ + } + } + iter.Release() + + if deletedCountStatuses > 0 { + err := statusesDB.Write(batch, nil) + if err != nil { + fmt.Printf("Failed to delete batch in statusesDB for %s/%s: %v\n", hostName, containerName, err) + } + statusesDB.CompactRange(leveldbUtil.Range{Start: nil, Limit: nil}) + } + } + } + } - if len(allLogs) == 0 { - fmt.Println("No logs found.") - return nil + time.Sleep(100 * time.Millisecond) } - bytesToDelete := int64(float64(totalLogBytes) * 0.10) - deletedBytes := int64(0) - - sort.Slice(allLogs, func(i, j int) bool { - return bytes.Compare(allLogs[i].key, allLogs[j].key) < 0 - }) + return nil +} - batches := make(map[string]*leveldb.Batch) - statusesDBs := make(map[string]*leveldb.DB) +func getCutoffKeysForContainer(db *leveldb.DB, limit int) ([]byte, error) { + iter := db.NewIterator(nil, nil) + defer iter.Release() - for _, entry := range allLogs { - if deletedBytes >= bytesToDelete { - break - } - location := entry.host + "/" + entry.container - if batches[location] == nil { - batches[location] = new(leveldb.Batch) - } - batches[location].Delete(entry.key) - deletedBytes += entry.size + var cutoffKeys [][]byte + for ok := iter.First(); ok && len(cutoffKeys) < limit; ok = iter.Next() { + key := append([]byte{}, iter.Key()...) + cutoffKeys = append(cutoffKeys, key) + } - if statusesDBs[location] == nil { - statusesDBs[location] = util.GetDB(entry.host, entry.container, "statuses") - } - if statusesDBs[location] != nil { - statusesDBs[location].Delete(entry.key, nil) - } + if len(cutoffKeys) < limit { + return nil, fmt.Errorf("insufficient records to form cutoff keys") } - for location, batch := range batches { - parts := strings.Split(location, "/") - host, container := parts[0], parts[1] - db := util.GetDB(host, container, "logs") - if db == nil { - continue - } + return cutoffKeys[len(cutoffKeys)-1], nil +} - err := db.Write(batch, nil) +func findOldestCutoffKey(cutoffKeys [][]byte) []byte { + var oldestKey []byte + var oldestTime time.Time + first := true + + for _, key := range cutoffKeys { + keyStr := string(key) + keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(keyStr)) if err != nil { - fmt.Printf("Failed to delete batch in %s/%s: %v\n", host, container, err) - } else { - fmt.Printf("Deleted %d logs from %s/%s\n", batch.Len(), host, container) + fmt.Println("Error parsing key time:", err) + continue } - db.CompactRange(leveldbUtil.Range{Start: nil, Limit: nil}) - if statusesDBs[location] != nil { - statusesDBs[location].CompactRange(leveldbUtil.Range{Start: nil, Limit: nil}) + + if first || keyTime.Before(oldestTime) { + oldestKey = key + oldestTime = keyTime + first = false + fmt.Println("New oldest key:", keyTime) } } - - fmt.Printf("Deleted total: %d bytes (target: %d = 10%%)\n", deletedBytes, bytesToDelete) - return nil + return oldestKey } var ( - logCleanupMu sync.Mutex - nextCleanup time.Time + logCleanupMu sync.Mutex + nextCleanup time.Time + isCleanupRunning bool ) func MaybeScheduleCleanup(host string, container string) { logCleanupMu.Lock() + defer logCleanupMu.Unlock() + if isCleanupRunning { + return + } if time.Now().Before(nextCleanup) { return } - nextCleanup = time.Now().Add(1 * time.Minute) + + isCleanupRunning = true + go func() { - time.Sleep(1 * time.Minute) err := checkAndManageLogSize(host, container) + + logCleanupMu.Lock() + defer logCleanupMu.Unlock() + + isCleanupRunning = false + nextCleanup = time.Now().Add(1 * time.Minute) + if err != nil { fmt.Printf("Log cleanup failed: %v\n", err) } diff --git a/application/build.sh b/application/build.sh index 43643dd..b9cd3f6 100755 --- a/application/build.sh +++ b/application/build.sh @@ -1,2 +1,2 @@ # docker buildx create --use -docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.4" . +docker buildx build --load --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.4" . From 402231e2e56fe1be4be03d038b74b535d6f52976 Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Fri, 25 Apr 2025 14:08:29 +0300 Subject: [PATCH 09/10] Remove debug print statement from findOldestCutoffKey function in containerdb.go --- application/backend/app/containerdb/containerdb.go | 1 - 1 file changed, 1 deletion(-) diff --git a/application/backend/app/containerdb/containerdb.go b/application/backend/app/containerdb/containerdb.go index 390dfde..0a71a00 100644 --- a/application/backend/app/containerdb/containerdb.go +++ b/application/backend/app/containerdb/containerdb.go @@ -198,7 +198,6 @@ func findOldestCutoffKey(cutoffKeys [][]byte) []byte { oldestKey = key oldestTime = keyTime first = false - fmt.Println("New oldest key:", keyTime) } } return oldestKey From 584aaf2e88f1f554f09c2e1cb53ea34084ec1014 Mon Sep 17 00:00:00 2001 From: Maksym Pipkun Date: Tue, 29 Apr 2025 10:08:10 +0300 Subject: [PATCH 10/10] Update Docker build tags in release script to version 1.1.6 --- application/release.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/application/release.sh b/application/release.sh index 9b9e618..a5a9ca8 100755 --- a/application/release.sh +++ b/application/release.sh @@ -1,4 +1,4 @@ # docker buildx create --use -docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.4" --push . +docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.6" --push . # docker run -v /var/run/docker.sock:/var/run/docker.sock --rm -it $(docker build -q -f Dockerfile .) # docker build . -t devforth/onlogs && docker push devforth/onlogs