diff --git a/stade/core/models/approach.py b/stade/core/models/approach.py index 5c7aef49..4988ba14 100644 --- a/stade/core/models/approach.py +++ b/stade/core/models/approach.py @@ -77,3 +77,9 @@ def latest_successful_submission(self) -> Submission | None: .order_by('-created') .first() ) + + @property + def manuscript_url(self): + if self.manuscript: + return self.manuscript.url + return None diff --git a/stade/core/rest/__init__.py b/stade/core/rest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/stade/core/rest/serializers.py b/stade/core/rest/serializers.py deleted file mode 100644 index ba9fcedb..00000000 --- a/stade/core/rest/serializers.py +++ /dev/null @@ -1,50 +0,0 @@ -from rest_framework import serializers - -from stade.core.models import Challenge, Task - - -class TaskSerializer(serializers.ModelSerializer): - class Meta: - model = Task - fields = ['id', 'name', 'type'] - - -class ChallengeSerializer(serializers.ModelSerializer): - class Meta: - model = Challenge - fields = ['id', 'name', 'tasks'] - - tasks = TaskSerializer(many=True, read_only=True) - - -class LeaderboardEntrySerializer(serializers.Serializer): - submission_id = serializers.IntegerField(source='id') - approach_name = serializers.CharField(source='approach.name') - approach_manuscript_url = serializers.SerializerMethodField() - approach_uses_external_data = serializers.BooleanField(source='approach.uses_external_data') - overall_score = serializers.FloatField() - submission_created = serializers.DateTimeField(source='created') - team_name = serializers.CharField(source='approach.team.name') - team_institution_name = serializers.SerializerMethodField() - team_institution_url = serializers.SerializerMethodField() - - def get_team_institution_url(self, submission): - return ( - None - if not submission.approach.team.institution_url - else submission.approach.team.institution_url - ) - - def get_team_institution_name(self, submission): - return ( - None - if not submission.approach.team.institution - else submission.approach.team.institution - ) - - def get_approach_manuscript_url(self, submission): - if submission.approach.manuscript: - return self.context['request'].build_absolute_uri(submission.approach.manuscript.url) - else: - # historical reasons, as well as the live challenge not requiring manuscripts - return None diff --git a/stade/core/rest/views.py b/stade/core/rest/views.py deleted file mode 100644 index 5b951818..00000000 --- a/stade/core/rest/views.py +++ /dev/null @@ -1,68 +0,0 @@ -import logging - -from django.http import Http404, JsonResponse -from django.shortcuts import get_object_or_404 -from rest_framework.decorators import api_view -from rest_framework.pagination import LimitOffsetPagination -from rest_framework.response import Response - -from stade.core.leaderboard import submissions_by_approach, submissions_by_team -from stade.core.models import Challenge, Submission, Task -from stade.core.rest.serializers import ChallengeSerializer, LeaderboardEntrySerializer - -logger = logging.getLogger(__name__) - - -@api_view(['GET']) -def challenge_detail(request, challenge_id): - challenge = get_object_or_404(Challenge, pk=challenge_id) - serializer = ChallengeSerializer(challenge) - return Response(serializer.data) - - -@api_view(['GET']) -def leaderboard(request, task_id, cluster): - if request.user.is_staff: - task = get_object_or_404(Task, pk=task_id) - else: - task = get_object_or_404(Task.objects.filter(scores_published=True), pk=task_id) - - paginator = LimitOffsetPagination() - paginator.default_limit = paginator.max_limit = 200 - - if cluster == 'approach': - submission_ids = submissions_by_approach(task.id) - elif cluster == 'team': - submission_ids = submissions_by_team(task.id) - else: - raise Http404() - - leaderboard_submissions = ( - Submission.objects.select_related('approach', 'approach__team') - .filter(id__in=submission_ids) - .order_by('-overall_score', 'created') - ) - - result_page = paginator.paginate_queryset(leaderboard_submissions, request) - serializer = LeaderboardEntrySerializer(result_page, many=True, context={'request': request}) - - return paginator.get_paginated_response(serializer.data) - - -@api_view(['GET']) -def submission_scores(request, submission_id): - if request.user.is_staff: - # Remove all deferred fields, since we want the score immediately - submission = get_object_or_404(Submission.objects.defer(None), pk=submission_id) - else: - # Remove all deferred fields, since we want the score immediately - submission = get_object_or_404( - Submission.objects.defer(None).filter(approach__task__scores_published=True), - pk=submission_id, - ) - - if isinstance(submission.score, list) or submission.score is None: - logger.warning('Unable to serialize submission score') - return JsonResponse({}) - - return JsonResponse(submission.score) diff --git a/stade/core/templates/leaderboards.html b/stade/core/templates/leaderboards.html index e5e4b5ca..4a50a5d1 100644 --- a/stade/core/templates/leaderboards.html +++ b/stade/core/templates/leaderboards.html @@ -1,15 +1,561 @@ {% extends "base.html" %} +{% load static %} {% block extra_head %} - - - + + + + {% endblock %} {% block content %} -
-
- - -
+
+
+
+

{{ challenge.name }} Leaderboards

+
+
+ + + +
+
+
+ + {% if tasks %} +
+ {% for task in tasks %} + + {{ task.name }} + + {% endfor %} +
+ {% endif %} + + {% if active_task and submissions %} + + + + + + + + + + + + + + {% for submission in submissions %} + + + + + + + + + + + + + {% endfor %} + +
+ Rank + {{ stats.total_submissions }} total + + Team + {{ stats.unique_teams }} unique teams + ApproachManuscript + Used External Data + {{ stats.used_external_data }} yes + + Primary Metric Value + {% if active_task.type == 'segmentation' %}Jaccard Index{% else %}{{ active_task.get_metric_field_display }}{% endif %} +
{{ forloop.counter }} +
{{ submission.approach.team.name }}
+ {% if submission.approach.team.institution %} +
+ {{ submission.approach.team.institution }} +
+ {% endif %} +
{{ submission.approach.name }} + {% if submission.approach.manuscript_url %} + + description + + {% else %} + - + {% endif %} + + {% if submission.approach.uses_external_data %} +
+ public + Yes +
+ {% else %} +
+ public_off + No +
+ {% endif %} +
{{ submission.overall_score|floatformat:3 }} + {% if active_task.type != 'segmentation' %} + + {% endif %} +
+
+ {% if active_task.type == 'classification' %} +
+
+ + +
+ +
+
+ +
+
+
+
+ + + {% endif %} +
+
+ {% elif active_task %} +
+

No submissions found for this task.

+
+ {% else %} +
+

No tasks available for this challenge.

+
+ {% endif %} +
+
+ + {% endblock %} diff --git a/stade/core/tests/test_leaderboard.py b/stade/core/tests/test_leaderboard.py index c847eb13..1e5d51d7 100644 --- a/stade/core/tests/test_leaderboard.py +++ b/stade/core/tests/test_leaderboard.py @@ -14,7 +14,9 @@ def task_with_submissions(db, approach_factory, submission_factory, task_factory - an approach where the most recent score is worse than a prior score (t1_a0) - a team with a better approach which has been rejected (t3) """ - task = task_factory(challenge__name='Test Challenge', name='Test Task', scores_published=True) + task = task_factory( + challenge__name='Test Challenge', name='Test Task', scores_published=True, hidden=False + ) teams = [ team_factory(challenge=task.challenge), @@ -66,7 +68,9 @@ def task_with_submissions(db, approach_factory, submission_factory, task_factory @pytest.mark.django_db def test_leaderboard_by_approach(task_with_submissions, client): - resp = client.get(f'/api/leaderboard/{task_with_submissions.id}/by-approach/') + resp = client.get( + f'/leaderboards/{task_with_submissions.challenge.slug}/?task={task_with_submissions.id}&group_by=approach' # noqa: E501 + ) assert resp.status_code == 200 # assert the by approach leaderboard looks like @@ -74,39 +78,43 @@ def test_leaderboard_by_approach(task_with_submissions, client): # team0 | approach1 | .80 # team1 | approach1 | .78 # team3 | approach1 | .60 - first, second, third, fourth = resp.json()['results'] - - assert first['team_name'] == 'team_0' - assert first['approach_name'] == 'approach_0' - assert first['overall_score'] == 0.95 - assert second['team_name'] == 'team_0' - assert second['approach_name'] == 'approach_1' - assert second['overall_score'] == 0.80 - assert third['team_name'] == 'team_1' - assert third['approach_name'] == 'approach_0' - assert third['overall_score'] == 0.78 - assert fourth['team_name'] == 'team_3' - assert fourth['approach_name'] == 'approach_1' - assert fourth['overall_score'] == 0.60 + submissions = resp.context['submissions'] + first, second, third, fourth = submissions[:4] + + assert first.approach.team.name == 'team_0' + assert first.approach.name == 'approach_0' + assert first.overall_score == 0.95 + assert second.approach.team.name == 'team_0' + assert second.approach.name == 'approach_1' + assert second.overall_score == 0.80 + assert third.approach.team.name == 'team_1' + assert third.approach.name == 'approach_0' + assert third.overall_score == 0.78 + assert fourth.approach.team.name == 'team_3' + assert fourth.approach.name == 'approach_1' + assert fourth.overall_score == 0.60 @pytest.mark.django_db def test_leaderboard_by_team(task_with_submissions, client): - resp = client.get(f'/api/leaderboard/{task_with_submissions.id}/by-team/') + resp = client.get( + f'/leaderboards/{task_with_submissions.challenge.slug}/?task={task_with_submissions.id}&group_by=team' # noqa: E501 + ) assert resp.status_code == 200 # assert the by team leaderboard looks like # team4 | approach0 | .95 # team5 | approach0 | .78 # team7 | approach1 | .60 - first, second, third = resp.json()['results'] - - assert first['team_name'] == 'team_5' - assert first['approach_name'] == 'approach_0' - assert first['overall_score'] == 0.95 - assert second['team_name'] == 'team_6' - assert second['approach_name'] == 'approach_0' - assert second['overall_score'] == 0.78 - assert third['team_name'] == 'team_8' - assert third['approach_name'] == 'approach_1' - assert third['overall_score'] == 0.60 + submissions = resp.context['submissions'] + first, second, third = submissions[:3] + + assert first.approach.team.name == 'team_5' + assert first.approach.name == 'approach_0' + assert first.overall_score == 0.95 + assert second.approach.team.name == 'team_6' + assert second.approach.name == 'approach_0' + assert second.overall_score == 0.78 + assert third.approach.team.name == 'team_8' + assert third.approach.name == 'approach_1' + assert third.overall_score == 0.60 diff --git a/stade/core/urls.py b/stade/core/urls.py index c10f130b..f7ff024b 100644 --- a/stade/core/urls.py +++ b/stade/core/urls.py @@ -4,7 +4,6 @@ from stade.core import views from stade.core.models import Challenge -from stade.core.rest import views as api_views class ChallengeFromSlugConverter: @@ -48,24 +47,6 @@ def to_url(self, value: str) -> str: views.request_submission_bundle, name='request-submission-bundle', ), - path('api/challenge//', api_views.challenge_detail, name='challenge-detail'), - path( - 'api/leaderboard//by-approach/', - api_views.leaderboard, - {'cluster': 'approach'}, - name='leaderboard-by-approach', - ), - path( - 'api/leaderboard//by-team/', - api_views.leaderboard, - {'cluster': 'team'}, - name='leaderboard-by-team', - ), - path( - 'api/submission//score/', - api_views.submission_scores, - name='submission-scores', - ), path('data/', TemplateView.as_view(template_name='data/base.html'), name='data'), path('stats/', views.stats, name='stats'), path('challenges/', views.challenges, name='challenges'), diff --git a/stade/core/views.py b/stade/core/views.py index 8ce325ac..ca21cd40 100644 --- a/stade/core/views.py +++ b/stade/core/views.py @@ -1,4 +1,5 @@ from datetime import timedelta +import json import logging from django.conf import settings @@ -21,6 +22,7 @@ CreateSubmissionForm, TeamForm, ) +from stade.core.leaderboard import submissions_by_approach, submissions_by_team from stade.core.models import Approach, Challenge, Submission, Task, Team, TeamInvitation from stade.core.tasks import generate_submission_bundle, score_submission, send_team_invitation from stade.core.utils import safe_redirect @@ -33,12 +35,73 @@ def handler500(request): def leaderboard_page(request, challenge): - # Default to grouping by team for all Challenges except 'live' - by_team_default = challenge.slug != 'live' + task_id = request.GET.get('task') + group_by = request.GET.get('group_by') + + if group_by not in ['team', 'approach']: + group_by = 'team' if challenge.slug != 'live' else 'approach' + + tasks = challenge.tasks.filter(scores_published=True).order_by('name') + + if not request.user.is_staff: + tasks = tasks.filter(hidden=False) + + if task_id: + try: + active_task = tasks.get(pk=task_id) + except Task.DoesNotExist: + active_task = tasks.first() + else: + active_task = tasks.first() + + submissions = [] + stats = { + 'total_submissions': 0, + 'unique_teams': 0, + 'used_external_data': 0, + } + + if group_by == 'team': + submission_ids = list(submissions_by_team(active_task.id)) + else: + submission_ids = list(submissions_by_approach(active_task.id)) + + submissions = list( + Submission.objects.defer(None) # avoid n+1 queries on the normally deferred score field + .select_related('approach', 'approach__team', 'creator') + .filter(id__in=submission_ids) + .order_by('-overall_score', 'created') + )[ + 0:200 + ] # leaderboards only show the top 200 results + + for submission in submissions: + if submission.score and isinstance(submission.score, dict): + submission.score_json = json.dumps(submission.score) + else: + submission.score_json = '{}' + + stats = { + 'total_submissions': len(submissions), + 'unique_teams': len( + set(s.approach.team.name for s in submissions if s.approach and s.approach.team) + ), + 'used_external_data': sum( + 1 for s in submissions if s.approach and s.approach.uses_external_data + ), + } + return render( request, 'leaderboards.html', - {'challenge': challenge, 'by_team_default': by_team_default}, + { + 'challenge': challenge, + 'tasks': tasks, + 'active_task': active_task, + 'submissions': submissions, + 'group_by': group_by, + 'stats': stats, + }, )