Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,3 +88,4 @@ Once done, just go to <your host> and login as "admin" with <any password>.
| AGENT | Toggles agent mode. If enabled, there will be no web interface available, and all logs will be sent and stored on HOST | `false` | -
| HOST | Url to OnLogs host from protocol to domain name. | | if `AGENT=true`
| ONLOGS_TOKEN | Token that will use an agent to authorize and connect to HOST | Generates with OnLogs interface | if `AGENT=true`
| MAX_LOGS_SIZE | Maximum allowed total logs size before cleanup triggers. Accepts human-readable formats like 5GB, 500MB, 1.5GB etc. When exceeded, 10% of logs (by count) will be removed proportionally across containers starting from oldest | 10GB | -
6 changes: 3 additions & 3 deletions application/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ ADD backend/. /backend/
WORKDIR /backend/

RUN go mod download \
&& go build -o main .
&& go build -o onlogs .

FROM alpine

COPY --from=frontbuilder /code/dist/ /dist/
COPY --from=backendbuilder /backend/main /backend/main
CMD ["/backend/main"]
COPY --from=backendbuilder /backend/onlogs /backend/onlogs
CMD ["/backend/onlogs"]
221 changes: 217 additions & 4 deletions application/backend/app/containerdb/containerdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,22 @@ import (
"fmt"
"os"
"strings"
"sync"
"time"

"github.com/devforth/OnLogs/app/util"
"github.com/devforth/OnLogs/app/vars"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
leveldbUtil "github.com/syndtr/goleveldb/leveldb/util"
)

func GetLogStatusKey(message string) string {
if strings.Contains(message, "ERROR") || strings.Contains(message, "ERR") || // const statuses_errors = ["ERROR", "ERR", "Error", "Err"];
strings.Contains(message, "Error") || strings.Contains(message, "Err") {
if strings.Contains(message, "ERROR") || strings.Contains(message, "ERR") {
return "error"
} else if strings.Contains(message, "WARN") || strings.Contains(message, "WARNING") { // const statuses_warnings = ["WARN", "WARNING"];
} else if strings.Contains(message, "WARN") || strings.Contains(message, "WARNING") {
return "warn"
} else if strings.Contains(message, "DEBUG") { // const statuses_other = ["DEBUG", "INFO", "ONLOGS"];
} else if strings.Contains(message, "DEBUG") {
return "debug"
} else if strings.Contains(message, "INFO") {
return "info"
Expand All @@ -28,6 +29,215 @@ func GetLogStatusKey(message string) string {
return "other"
}

func checkAndManageLogSize(host string, container string) error {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i can't see ussage of host and container function params

maxSize, err := util.ParseHumanReadableSize(os.Getenv("MAX_LOGS_SIZE"))
if err != nil {
return fmt.Errorf("failed to parse MAX_LOGS_SIZE: %v", err)
}

for {
hosts, err := os.ReadDir("leveldb/hosts/")
if err != nil {
return fmt.Errorf("failed to read hosts directory: %v", err)
}

var totalSize int64
for _, h := range hosts {
hostName := h.Name()
containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers")
for _, c := range containers {
containerName := c.Name()
size := util.GetDirSize(hostName, containerName)
totalSize += int64(size * 1024 * 1024)
}
}

fmt.Printf("Max size: %d, current dir size: %d\n", maxSize, totalSize)
if totalSize <= maxSize {
break
}

var cutoffKeys [][]byte
for _, h := range hosts {
hostName := h.Name()
containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers")
for _, c := range containers {
containerName := c.Name()
logsDB := util.GetDB(hostName, containerName, "logs")
if logsDB == nil {
continue
}

cutoffKeysForContainer, err := getCutoffKeysForContainer(logsDB, 200)
if err != nil || len(cutoffKeysForContainer) == 0 {
continue
}
cutoffKeys = append(cutoffKeys, cutoffKeysForContainer)
}
}

if len(cutoffKeys) == 0 {
fmt.Println("Nothing to delete, cutoff keys not found.")
break
}

oldestCutoffKey := findOldestCutoffKey(cutoffKeys)
oldestTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(oldestCutoffKey)))
if err != nil {
fmt.Println("Error parsing oldest time:", err)
break
}
fmt.Println("Oldest time for deletion cutoff:", oldestTime)

for _, h := range hosts {
hostName := h.Name()
containers, _ := os.ReadDir("leveldb/hosts/" + hostName + "/containers")
for _, c := range containers {
containerName := c.Name()
logsDB := util.GetDB(hostName, containerName, "logs")
if logsDB == nil {
continue
}

batch := new(leveldb.Batch)
deletedCount := 0
iter := logsDB.NewIterator(nil, nil)

count := 0
for ok := iter.First(); ok && count < 200; ok = iter.Next() {
count++
keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(iter.Key())))
if err != nil {
fmt.Println("Error parsing key time:", err)
continue
}
if keyTime.Before(oldestTime) || keyTime.Equal(oldestTime) {
batch.Delete(iter.Key())
deletedCount++
}
}
iter.Release()

if deletedCount > 0 {
err = logsDB.Write(batch, nil)
if err != nil {
fmt.Printf("Failed to delete batch in %s/%s: %v\n", hostName, containerName, err)
} else {
fmt.Printf("Deleted %d logs from %s/%s\n", deletedCount, hostName, containerName)
}
logsDB.CompactRange(leveldbUtil.Range{Start: nil, Limit: nil})
}

statusesDB := util.GetDB(hostName, containerName, "statuses")
if statusesDB != nil {
batch := new(leveldb.Batch)
deletedCountStatuses := 0
iter := statusesDB.NewIterator(nil, nil)

for ok := iter.First(); ok; ok = iter.Next() {
keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(string(iter.Key())))
if err != nil {
fmt.Println("Error parsing key time:", err)
continue
}
if keyTime.Before(oldestTime) || keyTime.Equal(oldestTime) {
batch.Delete(iter.Key())
deletedCountStatuses++
}
}
iter.Release()

if deletedCountStatuses > 0 {
err := statusesDB.Write(batch, nil)
if err != nil {
fmt.Printf("Failed to delete batch in statusesDB for %s/%s: %v\n", hostName, containerName, err)
}
statusesDB.CompactRange(leveldbUtil.Range{Start: nil, Limit: nil})
}
}
}
}

time.Sleep(100 * time.Millisecond)
}

return nil
}

func getCutoffKeysForContainer(db *leveldb.DB, limit int) ([]byte, error) {
iter := db.NewIterator(nil, nil)
defer iter.Release()

var cutoffKeys [][]byte
for ok := iter.First(); ok && len(cutoffKeys) < limit; ok = iter.Next() {
key := append([]byte{}, iter.Key()...)
cutoffKeys = append(cutoffKeys, key)
}

if len(cutoffKeys) < limit {
return nil, fmt.Errorf("insufficient records to form cutoff keys")
}

return cutoffKeys[len(cutoffKeys)-1], nil
}

func findOldestCutoffKey(cutoffKeys [][]byte) []byte {
var oldestKey []byte
var oldestTime time.Time
first := true

for _, key := range cutoffKeys {
keyStr := string(key)
keyTime, err := time.Parse(time.RFC3339Nano, getDateTimeFromKey(keyStr))
if err != nil {
fmt.Println("Error parsing key time:", err)
continue
}

if first || keyTime.Before(oldestTime) {
oldestKey = key
oldestTime = keyTime
first = false
}
}
return oldestKey
}

var (
logCleanupMu sync.Mutex
nextCleanup time.Time
isCleanupRunning bool
)

func MaybeScheduleCleanup(host string, container string) {
logCleanupMu.Lock()

defer logCleanupMu.Unlock()

if isCleanupRunning {
return
}
if time.Now().Before(nextCleanup) {
return
}

isCleanupRunning = true

go func() {
err := checkAndManageLogSize(host, container)

logCleanupMu.Lock()
defer logCleanupMu.Unlock()

isCleanupRunning = false
nextCleanup = time.Now().Add(1 * time.Minute)

if err != nil {
fmt.Printf("Log cleanup failed: %v\n", err)
}
}()
}

func PutLogMessage(db *leveldb.DB, host string, container string, message_item []string) error {
if len(message_item[0]) < 30 {
fmt.Println("WARNING: got broken timestamp: ", "timestamp: "+message_item[0], "message: "+message_item[1])
Expand All @@ -37,6 +247,9 @@ func PutLogMessage(db *leveldb.DB, host string, container string, message_item [
if host == "" {
panic("Host is not mentioned!")
}

MaybeScheduleCleanup(host, container)

location := host + "/" + container
if vars.Statuses_DBs[location] == nil {
vars.Statuses_DBs[location] = util.GetDB(host, container, "statuses")
Expand Down
31 changes: 31 additions & 0 deletions application/backend/app/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"time"

Expand Down Expand Up @@ -298,3 +299,33 @@ func GetStorageData() map[string]float64 {
// time.Sleep(time.Second * 30)
// }
// }

var units = []struct {
Suffix string
Multiplier int64
}{
{"TB", 1024 * 1024 * 1024 * 1024},
{"T", 1024 * 1024 * 1024 * 1024},
{"GB", 1024 * 1024 * 1024},
{"G", 1024 * 1024 * 1024},
{"MB", 1024 * 1024},
{"M", 1024 * 1024},
{"KB", 1024},
{"K", 1024},
{"B", 1},
}

func ParseHumanReadableSize(sizeStr string) (int64, error) {
sizeStr = strings.TrimSpace(strings.ToUpper(sizeStr))
for _, unit := range units {
if strings.HasSuffix(sizeStr, unit.Suffix) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lets support 1g 1m etc also
if strings.HasSuffix(sizeStr, unit.Suffix) || strings.HasSuffix(sizeStr, strings.TrimSuffix(unit.Suffix, "B")) {

numStr := strings.TrimSuffix(sizeStr, unit.Suffix)
num, err := strconv.ParseFloat(numStr, 64)
if err != nil {
return 0, fmt.Errorf("invalid number in size: %s", numStr)
}
return int64(num * float64(unit.Multiplier)), nil
}
}
return 0, fmt.Errorf("unknown size unit in: %s", sizeStr)
}
5 changes: 5 additions & 0 deletions application/backend/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ func init_config() {
if os.Getenv("DOCKER_SOCKET_PATH") == "" {
os.Setenv("DOCKER_SOCKET_PATH", "/var/run/docker.sock")
}

if os.Getenv("MAX_LOGS_SIZE") == "" {
os.Setenv("MAX_LOGS_SIZE", "10GB")
}

fmt.Println("INFO: OnLogs configs done!")
}

Expand Down
4 changes: 1 addition & 3 deletions application/build.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,2 @@
# docker buildx create --use
docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.2" --push .
# docker run -v /var/run/docker.sock:/var/run/docker.sock --rm -it $(docker build -q -f Dockerfile .)
# docker build . -t devforth/onlogs && docker push devforth/onlogs
docker buildx build --load --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.4" .
6 changes: 2 additions & 4 deletions application/frontend/src/lib/CheckBox/CheckBox.scss
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
align-items: center;
padding: 5px;
box-sizing: border-box;
justify-content: end;
justify-content: start;
cursor: pointer;
position: relative;
}
Expand All @@ -16,15 +16,13 @@
.checkboxRoll {
background-color: $active-color;
position: absolute;
// left: 0;
transform: translateX(-85%);
transform: translateX(85%);
transition: all 100ms;
}
}

.inactive {
.checkboxRoll {
right: 0;
transform: translateX(0);
transition: all 200ms;
}
Expand Down
2 changes: 1 addition & 1 deletion application/frontend/src/lib/DropDown/DropDown.scss
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
right: 100%;
transform: translate(25%);
.dropDownRawEl.text {
margin-right: 0px;
margin-right: 5px;
}
}

Expand Down
4 changes: 4 additions & 0 deletions application/release.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# docker buildx create --use
docker buildx build --platform=linux/amd64,linux/arm64 --tag "devforth/onlogs:latest" --tag "devforth/onlogs:1.1.6" --push .
# docker run -v /var/run/docker.sock:/var/run/docker.sock --rm -it $(docker build -q -f Dockerfile .)
# docker build . -t devforth/onlogs && docker push devforth/onlogs