Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
28a299c
update README
agge3 Nov 29, 2025
8796342
pulling recent commit and getting up to speed
whoIsStella Dec 3, 2025
5e88812
adding workflow
whoIsStella Dec 7, 2025
eabc008
workflow restrict
whoIsStella Dec 7, 2025
67dd1ef
workflow restrict
whoIsStella Dec 7, 2025
6222772
workflow restrict
whoIsStella Dec 7, 2025
96c31a7
workflow restrict
whoIsStella Dec 7, 2025
14fd06e
workflow restrict
whoIsStella Dec 7, 2025
739a7cc
workflow restrict
whoIsStella Dec 7, 2025
343bba1
workflow restrict
whoIsStella Dec 7, 2025
745e333
workflow restrict
whoIsStella Dec 7, 2025
9e397c0
workflow restrict
whoIsStella Dec 7, 2025
23825d2
workflow restrict
whoIsStella Dec 7, 2025
700b7dd
linting
whoIsStella Dec 7, 2025
956931a
style: apply ruff formatting
whoIsStella Dec 8, 2025
1a03755
adding CI/CDstuff
whoIsStella Dec 8, 2025
6aea3b7
stuff
whoIsStella Dec 8, 2025
b1f661b
stuff
whoIsStella Dec 8, 2025
815fb2c
stuff
whoIsStella Dec 8, 2025
78303df
new
whoIsStella Jan 1, 2026
7d139a0
vbn
whoIsStella Jan 1, 2026
9ded175
xcgvb
whoIsStella Jan 2, 2026
aa839fb
tests and l a u n d r y
whoIsStella Jan 2, 2026
668d5e2
df
whoIsStella Jan 2, 2026
1ff53e7
ruff ignore
whoIsStella Jan 2, 2026
b4dbee9
Cleanup imports, style, and exception handling
whoIsStella Jan 4, 2026
49a98e6
fgh
whoIsStella Jan 4, 2026
32c6467
gh
whoIsStella Jan 4, 2026
afb7002
Merge branch 'version_2.0.2' into neoStella
whoIsStella Jan 4, 2026
506dc57
Merge branch 'version_2.0.2' of https://github.com/agge3/benchr into …
whoIsStella Jan 4, 2026
e3fbd3f
Merge branch 'neoStella' of https://github.com/agge3/benchr into neoS…
whoIsStella Jan 4, 2026
41e9f32
Update
whoIsStella Jan 4, 2026
3a04bea
gvhbjn
whoIsStella Jan 4, 2026
3d48806
new
whoIsStella Jan 4, 2026
b8d2549
note
whoIsStella Jan 4, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
801 changes: 801 additions & 0 deletions .github/workflows/pr.yml

Large diffs are not rendered by default.

15 changes: 10 additions & 5 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,17 @@ backend_things/other_caddys_save_for_later/other caddy
/backend_things/data/
/backend_things/logs/
/backend_things/venv/

*migrate_problems.py
models.py
*problem_models.py
__pycache__/
/backend_things/notes.txt



frontend_things/notes.txt
*.ruff_cache
*settings.json
*.pytest_cache
*frontend_things/notes.txt
/frontend_things/
/backend_things/

node_modules/
package-lock.json
Expand All @@ -36,3 +40,4 @@ venv
data

logs
*note.md
8 changes: 8 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
6 changes: 2 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
# Benchr
### Benchmark your code. Compete with the world.

<image of website here>
### Benchmark your code. Compare two languages. See the difference.

---

Expand All @@ -10,7 +8,7 @@
Benchr is a real-time code benchmarking platform that lets you write, execute, and analyze code performance across multiple
languages. Submit your code and get detailed metrics including CPU cycles, instructions per cycle (IPC), cache behavior, branch
predictions, memory usage, and execution time—all running in isolated Firecracker microVMs for security and consistency. Whether
you're optimizing algorithms, comparing implementations, or competing on leaderboards, Benchr gives you the low-level insights
you're optimizing algorithms or comparing implementations, Benchr gives you the low-level insights
you need.

### Project Structure
Expand Down
21 changes: 11 additions & 10 deletions backend/IPubSub.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from abc import ABC, abstractmethod
from typing import Callable, Optional, Dict, Set
import redis.asyncio as aioredis
import json
import asyncio
import json
import logging
from abc import ABC, abstractmethod
from typing import Dict, Optional, Set # noqa: UP035

import redis.asyncio as aioredis

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -68,7 +69,7 @@ class RedisPubSub(IPubSub):
await pubsub.publish('job_results', {'job_id': 123})
"""

_instance: Optional['RedisPubSub'] = None
_instance: Optional["RedisPubSub"] = None
_lock: asyncio.Lock = None

def __init__(self):
Expand All @@ -80,7 +81,7 @@ def __init__(self):
self._connected = False

@classmethod
async def get_instance(cls, redis_url: str = None) -> 'RedisPubSub':
async def get_instance(cls, redis_url: str = None) -> "RedisPubSub":
"""
Get singleton instance with lazy initialization

Expand All @@ -107,7 +108,7 @@ async def connect(self, redis_url: str) -> None:

self._redis = await aioredis.from_url(redis_url, decode_responses=True)
self._pubsub = self._redis.pubsub()
await self._pubsub.subscribe('job_results')
await self._pubsub.subscribe("job_results")
self._running = True
self._connected = True
self._task = asyncio.create_task(self._listen())
Expand All @@ -117,14 +118,14 @@ async def _listen(self):
"""Background listener task"""
try:
async for message in self._pubsub.listen():
if message['type'] != 'message':
if message["type"] != "message":
continue

channel = message['channel']
channel = message["channel"]
print(f"[RedisPubSub] Received on {channel}: {message['data']}")

try:
data = json.loads(message['data'])
data = json.loads(message["data"])

if channel in self._subscribers:
for queue in list(self._subscribers[channel]):
Expand Down
93 changes: 44 additions & 49 deletions backend/IQueue.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from abc import ABC, abstractmethod
import queue
from abc import abstractmethod

import redis
import os
from dotenv import load_dotenv


class IQueue:
def __init__(self, maxsize, env):
Expand Down Expand Up @@ -32,11 +34,10 @@ def hasFront(self):
def size(self):
pass


class GlobalQueue(IQueue):
def __init__(self,
maxsize = 1024
):
self._queue = IQueue(maxsize)
def __init__(self, maxsize=1024):
self._queue = queue.Queue(maxsize=maxsize)

def full(self):
return self._queue.full()
Expand All @@ -45,12 +46,12 @@ def empty(self):
return self._queue.empty()

def push(self, program_id: str):
if (self._queue.full()):
if self._queue.full():
return False

self._queue.put(program_id)
return True

def pop(self):
if self.empty():
return None
Expand All @@ -59,20 +60,16 @@ def pop(self):

def hasFront(self):
return self._queue.qsize() > 0

def size(self):
return self._queue.qsize()


class RedisQueue(IQueue):
def __init__(self,
name,
redis_url,
maxsize = 1024
):
def __init__(self, name, redis_url, maxsize=1024):
self.name = name
self.redis_url = redis_url
self.redis = None # multiclients connect to one redis instance
self.redis = None # multiclients connect to one redis instance
self.queued_key = f"{name}:queued"
self.processing_key = f"{name}:processing"
self.notify_channel = f"{name}:notify"
Expand All @@ -86,13 +83,13 @@ def init(self):
self.redis_url,
decode_responses=True,
socket_connect_timeout=5,
socket_keepalive=True
socket_keepalive=True,
)
# test connection
self.redis.ping()
print(f"REDIS: connected to redis: {self.redis_url}")
except redis.ConnectionError as e:
print(f"REDIS: ERROR: cannot connect to redis")
except redis.ConnectionError:
print("REDIS: ERROR: cannot connect to redis")
raise
except Exception as e:
print(f"REDIS: ERROR: {e}")
Expand All @@ -112,103 +109,101 @@ def empty(self):
def push(self, job_id: int):
"""
Add job to queued list

Returns:
True if added, False if queue is full
"""
if self.full():
print(f"[RedisQueue] Queue full, cannot push job {job_id}")
return False

try:
# Add to right of queued list (FIFO)
self.redis.rpush(self.queued_key, job_id)

# Optional: notify subscribers
self.redis.publish(self.notify_channel, job_id)

return True
except redis.ConnectionError as e:
print(f"[RedisQueue] Connection error in push(): {e}")
return False

def pend(self, timeout=5):
"""
Move job from queued to processing (atomic operation)
This marks the job as being worked on.

Args:
timeout: Seconds to wait for a job (blocking)

Returns:
job_id (int) or None if timeout
"""
try:
# Atomically move from queued to processing
# BRPOPLPUSH: blocking right pop from queued, left push to processing
result = self.redis.brpoplpush(
self.queued_key,
self.processing_key,
timeout=timeout
self.queued_key, self.processing_key, timeout=timeout
)

if result:
return int(result)
return None

except redis.ConnectionError as e:
print(f"[RedisQueue] Connection error in pend(): {e}")
return None
except ValueError as e:
print(f"[RedisQueue] Invalid job_id in pend(): {e}")
return None

def pop(self, timeout=5):
"""
Remove job from processing queue (job is complete)

Args:
timeout: Seconds to wait for a job (blocking)

Returns:
job_id (int) or None if timeout/empty
"""
if self.redis.llen(self.processing_key) == 0:
return None

try:
# Remove from right of processing list
result = self.redis.brpop(self.processing_key, timeout=timeout)

if result:
_, job_id = result
return int(job_id)
return None

except redis.ConnectionError as e:
print(f"[RedisQueue] Connection error in pop(): {e}")
return None
except ValueError as e:
print(f"[RedisQueue] Invalid job_id in pop(): {e}")
return None

def hasFront(self):
"""
Check if there are jobs waiting in queued list
Note: processing jobs don't count as "queued"

Returns:
True if queued list has items
"""
try:
return self.redis.llen(self.queued_key) > 0
except redis.ConnectionError:
return False

def size(self):
"""
Total size = queued + processing

Returns:
Total number of jobs in both lists
"""
Expand All @@ -219,21 +214,21 @@ def size(self):
except redis.ConnectionError:
print("[RedisQueue] Connection error in size()")
return 0

def queued_size(self):
"""Get size of queued list only"""
try:
return self.redis.llen(self.queued_key)
except redis.ConnectionError:
return 0

def processing_size(self):
"""Get size of processing list only"""
try:
return self.redis.llen(self.processing_key)
except redis.ConnectionError:
return 0

def clear(self):
"""Clear both queued and processing lists (use with caution!)"""
try:
Expand All @@ -242,7 +237,7 @@ def clear(self):
print("[RedisQueue] Queue cleared")
except redis.ConnectionError:
print("[RedisQueue] Connection error in clear()")

def peek_queued(self):
"""Look at first queued job without removing it"""
try:
Expand All @@ -252,7 +247,7 @@ def peek_queued(self):
return None
except (redis.ConnectionError, ValueError):
return None

def peek_processing(self):
"""Look at first processing job without removing it"""
try:
Expand All @@ -262,12 +257,12 @@ def peek_processing(self):
return None
except (redis.ConnectionError, ValueError):
return None

def requeue_processing(self):
"""
Move all processing jobs back to queued (recovery operation)
Use this if JobManager crashes and you want to retry jobs

Returns:
Number of jobs moved
"""
Expand All @@ -278,11 +273,11 @@ def requeue_processing(self):
if not result:
break
count += 1

if count > 0:
print(f"[RedisQueue] Requeued {count} processing jobs")
return count

except redis.ConnectionError:
print("[RedisQueue] Connection error in requeue_processing()")
return 0
Loading
Loading