diff --git a/app/grandchallenge/challenges/forms.py b/app/grandchallenge/challenges/forms.py index d9c11e3688..42d8db7aa3 100644 --- a/app/grandchallenge/challenges/forms.py +++ b/app/grandchallenge/challenges/forms.py @@ -16,6 +16,8 @@ from django_select2.forms import Select2MultipleWidget from grandchallenge.challenges.models import Challenge, ChallengeRequest +from grandchallenge.components.models import GPUTypeChoices +from grandchallenge.components.schemas import get_default_gpu_type_choices from grandchallenge.core.widgets import MarkdownEditorInlineWidget from grandchallenge.subdomains.utils import reverse_lazy @@ -45,6 +47,8 @@ "registration_page_markdown", ) +HTMX_BLANK_CHOICE_KEY = "__HTMX_BLANK_CHOICE_KEY__" + class ChallengeUpdateForm(forms.ModelForm): def __init__(self, *args, **kwargs): @@ -108,47 +112,6 @@ def clean(self): return cleaned_data -class ChallengeRequestBudgetFieldValidationMixin: - def clean(self): - cleaned_data = super().clean() - if ( - "average_size_of_test_image_in_mb" not in cleaned_data.keys() - or not cleaned_data["average_size_of_test_image_in_mb"] - ): - raise ValidationError( - "Please provide the average test image size." - ) - if ( - "inference_time_limit_in_minutes" not in cleaned_data.keys() - or not cleaned_data["inference_time_limit_in_minutes"] - ): - raise ValidationError("Please provide an inference time limit.") - if ( - "phase_1_number_of_submissions_per_team" not in cleaned_data.keys() - or "phase_2_number_of_submissions_per_team" - not in cleaned_data.keys() - or cleaned_data["phase_1_number_of_submissions_per_team"] is None - or cleaned_data["phase_2_number_of_submissions_per_team"] is None - ): - raise ValidationError( - "Please provide the number of " - "submissions per team for each phase. Enter 0 for phase 2 " - "if you only have 1 phase." - ) - if ( - "phase_1_number_of_test_images" not in cleaned_data.keys() - or "phase_2_number_of_test_images" not in cleaned_data.keys() - or cleaned_data["phase_1_number_of_test_images"] is None - or cleaned_data["phase_2_number_of_test_images"] is None - ): - raise ValidationError( - "Please provide the number of " - "test images for each phase. Enter 0 for phase 2 if you " - "only have 1 phase." - ) - return cleaned_data - - general_information_items_1 = ( "title", "short_name", @@ -189,9 +152,23 @@ def clean(self): ) -class ChallengeRequestForm( - ChallengeRequestBudgetFieldValidationMixin, forms.ModelForm -): +class ChallengeRequestForm(forms.ModelForm): + algorithm_selectable_gpu_type_choices = forms.MultipleChoiceField( + initial=get_default_gpu_type_choices(), + choices=[ + (choice.value, choice.label) + for choice in [ + GPUTypeChoices.NO_GPU, + GPUTypeChoices.T4, + GPUTypeChoices.A10G, + ] + ], + widget=forms.CheckboxSelectMultiple, + label="Selectable GPU types for algorithm jobs", + help_text="The GPU type choices that participants will be able to select for " + "their algorithm inference jobs.", + ) + class Meta: model = ChallengeRequest fields = ( @@ -203,6 +180,8 @@ class Meta: "number_of_tasks", "average_size_of_test_image_in_mb", "inference_time_limit_in_minutes", + "algorithm_selectable_gpu_type_choices", + "algorithm_maximum_settable_memory_gb", "algorithm_inputs", "algorithm_outputs", *phase_1_items, @@ -260,6 +239,7 @@ class Meta: "phase_2_number_of_submissions_per_team": "Expected number of submissions per team to Phase 2", "budget_for_hosting_challenge": "Budget for hosting challenge in Euros", "inference_time_limit_in_minutes": "Average algorithm job run time in minutes", + "algorithm_maximum_settable_memory_gb": "Maximum memory for algorithm jobs in GB", "structured_challenge_submission_doi": "DOI", "structured_challenge_submission_form": "PDF", "challenge_fee_agreement": format_html( @@ -361,7 +341,8 @@ class Meta: "phase_2_number_of_test_images": ( "Number of test images for this phase. If you're " - "bundling images, enter the number of batches (not the number of single images)." + "bundling images, enter the number of batches (not the number of single images). " + "Enter 0 here if you only have one phase." ), "average_size_of_test_image_in_mb": ( "Average size of test image in MB. If you're one single image, including " + "model loading, i/o, preprocessing and inference." + ), + } def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + if "" in ( + initial := self.instance.algorithm_selectable_gpu_type_choices + ): + initial[initial.index("")] = HTMX_BLANK_CHOICE_KEY + self.fields["algorithm_selectable_gpu_type_choices"].initial = ( + initial + ) self.helper = FormHelper(self) self.helper.form_id = "budget" self.helper.attrs.update( @@ -635,3 +640,11 @@ def __init__(self, *args, **kwargs): } ) self.helper.layout.append(Submit("save", "Save")) + + def clean_algorithm_selectable_gpu_type_choices(self): + data = self.cleaned_data.get( + "algorithm_selectable_gpu_type_choices", [] + ) + if HTMX_BLANK_CHOICE_KEY in data: + data[data.index(HTMX_BLANK_CHOICE_KEY)] = "" + return data diff --git a/app/grandchallenge/challenges/migrations/0045_challengerequest_algorithm_maximum_settable_memory_gb_and_more.py b/app/grandchallenge/challenges/migrations/0045_challengerequest_algorithm_maximum_settable_memory_gb_and_more.py new file mode 100644 index 0000000000..690760c823 --- /dev/null +++ b/app/grandchallenge/challenges/migrations/0045_challengerequest_algorithm_maximum_settable_memory_gb_and_more.py @@ -0,0 +1,137 @@ +# Generated by Django 4.2.16 on 2024-12-05 13:14 + +import django.core.validators +from django.db import migrations, models + +import grandchallenge.components.schemas +import grandchallenge.core.validators + + +class Migration(migrations.Migration): + dependencies = [ + ("challenges", "0044_remove_challenge_registration_page_text"), + ] + + operations = [ + migrations.AddField( + model_name="challengerequest", + name="algorithm_maximum_settable_memory_gb", + field=models.PositiveSmallIntegerField( + default=32, + help_text="Maximum amount of memory that participants will be allowed to assign to algorithm inference jobs for submission.", + ), + ), + migrations.AddField( + model_name="challengerequest", + name="algorithm_selectable_gpu_type_choices", + field=models.JSONField( + default=grandchallenge.components.schemas.get_default_gpu_type_choices, + help_text='The GPU type choices that participants will be able to select for their algorithm inference jobs. Options are ["", "A100", "A10G", "V100", "K80", "T4"].', + validators=[ + grandchallenge.core.validators.JSONValidator( + schema={ + "$schema": "http://json-schema.org/draft-07/schema", + "items": { + "enum": [ + "", + "A100", + "A10G", + "V100", + "K80", + "T4", + ], + "type": "string", + }, + "title": "The Selectable GPU Types Schema", + "type": "array", + "uniqueItems": True, + } + ) + ], + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="algorithm_inputs", + field=models.TextField( + help_text="What are the inputs to the algorithms submitted as solutions to your challenge going to be? Please describe in detail what the input(s) reflect(s), for example, MRI scan of the brain, or chest X-ray. Grand Challenge only supports .mha and .tiff image files and json files for algorithms." + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="algorithm_outputs", + field=models.TextField( + help_text="What are the outputs to the algorithms submitted as solutions to your challenge going to be? Please describe in detail what the output(s) reflect(s), for example, probability of a positive PCR result, or stroke lesion segmentation. " + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="average_size_of_test_image_in_mb", + field=models.PositiveIntegerField( + help_text="Average size of a test image in MB.", + validators=[ + django.core.validators.MinValueValidator(limit_value=1), + django.core.validators.MaxValueValidator( + limit_value=10000 + ), + ], + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="budget_for_hosting_challenge", + field=models.PositiveIntegerField( + default=0, + help_text="What is your budget for hosting this challenge? Please be reminded of our challenge pricing policy.", + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="data_license", + field=models.BooleanField(default=True), + ), + migrations.AlterField( + model_name="challengerequest", + name="inference_time_limit_in_minutes", + field=models.PositiveIntegerField( + help_text="Average run time per algorithm job in minutes.", + validators=[ + django.core.validators.MinValueValidator(limit_value=5), + django.core.validators.MaxValueValidator(limit_value=60), + ], + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="long_term_commitment", + field=models.BooleanField(default=True), + ), + migrations.AlterField( + model_name="challengerequest", + name="phase_1_number_of_submissions_per_team", + field=models.PositiveIntegerField( + help_text="How many submissions do you expect per team in this phase?" + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="phase_1_number_of_test_images", + field=models.PositiveIntegerField( + help_text="Number of test images for this phase." + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="phase_2_number_of_submissions_per_team", + field=models.PositiveIntegerField( + help_text="How many submissions do you expect per team in this phase?" + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="phase_2_number_of_test_images", + field=models.PositiveIntegerField( + help_text="Number of test images for this phase." + ), + ), + ] diff --git a/app/grandchallenge/challenges/models.py b/app/grandchallenge/challenges/models.py index 46e5703b2d..366262fd3b 100644 --- a/app/grandchallenge/challenges/models.py +++ b/app/grandchallenge/challenges/models.py @@ -45,7 +45,11 @@ send_challenge_requested_email_to_reviewers, ) from grandchallenge.challenges.utils import ChallengeTypeChoices -from grandchallenge.components.schemas import GPUTypeChoices +from grandchallenge.components.schemas import ( + SELECTABLE_GPU_TYPES_SCHEMA, + GPUTypeChoices, + get_default_gpu_type_choices, +) from grandchallenge.core.models import UUIDModel from grandchallenge.core.storage import ( get_banner_path, @@ -59,6 +63,7 @@ ) from grandchallenge.core.validators import ( ExtensionValidator, + JSONValidator, MimeTypeValidator, ) from grandchallenge.evaluation.tasks import assign_evaluation_permissions @@ -898,17 +903,29 @@ class ChallengeRequestStatusChoices(models.TextChoices): validators=[MinValueValidator(limit_value=1)], ) inference_time_limit_in_minutes = models.PositiveIntegerField( - blank=True, - null=True, help_text="Average run time per algorithm job in minutes.", validators=[ MinValueValidator(limit_value=5), MaxValueValidator(limit_value=60), ], ) + algorithm_selectable_gpu_type_choices = models.JSONField( + default=get_default_gpu_type_choices, + help_text=( + "The GPU type choices that participants will be able to select for their " + "algorithm inference jobs. Options are " + f"{GPUTypeChoices.values}.".replace("'", '"') + ), + validators=[JSONValidator(schema=SELECTABLE_GPU_TYPES_SCHEMA)], + ) + algorithm_maximum_settable_memory_gb = models.PositiveSmallIntegerField( + default=settings.ALGORITHMS_MAX_MEMORY_GB, + help_text=( + "Maximum amount of memory that participants will be allowed to " + "assign to algorithm inference jobs for submission." + ), + ) average_size_of_test_image_in_mb = models.PositiveIntegerField( - null=True, - blank=True, help_text="Average size of a test image in MB.", validators=[ MinValueValidator(limit_value=1), @@ -916,23 +933,15 @@ class ChallengeRequestStatusChoices(models.TextChoices): ], ) phase_1_number_of_submissions_per_team = models.PositiveIntegerField( - null=True, - blank=True, help_text="How many submissions do you expect per team in this phase?", ) phase_2_number_of_submissions_per_team = models.PositiveIntegerField( - null=True, - blank=True, help_text="How many submissions do you expect per team in this phase?", ) phase_1_number_of_test_images = models.PositiveIntegerField( - null=True, - blank=True, help_text="Number of test images for this phase.", ) phase_2_number_of_test_images = models.PositiveIntegerField( - null=True, - blank=True, help_text="Number of test images for this phase.", ) number_of_tasks = models.PositiveIntegerField( @@ -943,21 +952,17 @@ class ChallengeRequestStatusChoices(models.TextChoices): ) budget_for_hosting_challenge = models.PositiveIntegerField( default=0, - null=True, - blank=True, help_text="What is your budget for hosting this challenge? Please be reminded of our challenge pricing policy.", ) long_term_commitment = models.BooleanField( - null=True, - blank=True, + default=True, ) long_term_commitment_extra = models.CharField( max_length=2000, blank=True, ) data_license = models.BooleanField( - null=True, - blank=True, + default=True, ) data_license_extra = models.CharField( max_length=2000, @@ -968,7 +973,6 @@ class ChallengeRequestStatusChoices(models.TextChoices): help_text="If you have any comments, remarks or questions, please leave them here.", ) algorithm_inputs = models.TextField( - blank=True, help_text="What are the inputs to the algorithms submitted as solutions to " "your challenge going to be? " "Please describe in detail " @@ -977,7 +981,6 @@ class ChallengeRequestStatusChoices(models.TextChoices): "supports .mha and .tiff image files and json files for algorithms.", ) algorithm_outputs = models.TextField( - blank=True, help_text="What are the outputs to the algorithms submitted as solutions to " "your challenge going to be? " "Please describe in detail what the output(s) " @@ -1049,6 +1052,8 @@ def budget_fields(self): "expected_number_of_teams", "number_of_tasks", "inference_time_limit_in_minutes", + "algorithm_selectable_gpu_type_choices", + "algorithm_maximum_settable_memory_gb", "average_size_of_test_image_in_mb", "phase_1_number_of_submissions_per_team", "phase_1_number_of_test_images", @@ -1147,17 +1152,22 @@ def total_data_and_docker_storage_bytes(self): + self.phase_2_storage_size_bytes ) - @property + @cached_property def compute_euro_cents_per_hour(self): - executor = import_string(settings.COMPONENTS_DEFAULT_BACKEND)( - job_id="", - exec_image_repo_tag="", - # Assume these options picked by the participant - memory_limit=32, - time_limit=self.inference_time_limit_in_minutes, - requires_gpu_type=GPUTypeChoices.T4, + executors = [ + import_string(settings.COMPONENTS_DEFAULT_BACKEND)( + job_id="", + exec_image_repo_tag="", + memory_limit=self.algorithm_maximum_settable_memory_gb, + time_limit=self.inference_time_limit_in_minutes, + requires_gpu_type=gpu_type, + ) + for gpu_type in self.algorithm_selectable_gpu_type_choices + ] + usd_cents_per_hour = max( + executor.usd_cents_per_hour for executor in executors ) - return executor.usd_cents_per_hour * settings.COMPONENTS_USD_TO_EUR + return usd_cents_per_hour * settings.COMPONENTS_USD_TO_EUR def get_compute_costs_euros(self, duration): return self.round_to_10_euros( diff --git a/app/grandchallenge/challenges/templates/challenges/partials/budget_table.html b/app/grandchallenge/challenges/templates/challenges/partials/budget_table.html index eacdfd2a19..2cc0e7af36 100644 --- a/app/grandchallenge/challenges/templates/challenges/partials/budget_table.html +++ b/app/grandchallenge/challenges/templates/challenges/partials/budget_table.html @@ -8,7 +8,9 @@ diff --git a/app/grandchallenge/evaluation/forms.py b/app/grandchallenge/evaluation/forms.py index e75a7e5b08..1bef795ad3 100644 --- a/app/grandchallenge/evaluation/forms.py +++ b/app/grandchallenge/evaluation/forms.py @@ -754,6 +754,14 @@ class ConfigureAlgorithmPhasesForm(SaveFormInitMixin, Form): widget=forms.HiddenInput(), disabled=True, ) + algorithm_selectable_gpu_type_choices = forms.JSONField( + widget=forms.HiddenInput(), + disabled=True, + ) + algorithm_maximum_settable_memory_gb = forms.IntegerField( + widget=forms.HiddenInput(), + disabled=True, + ) def __init__(self, *args, challenge, **kwargs): super().__init__(*args, **kwargs) @@ -772,10 +780,29 @@ def __init__(self, *args, challenge, **kwargs): challenge_request = ChallengeRequest.objects.get( short_name=challenge.short_name ) + except ObjectDoesNotExist: + challenge_request = None + if challenge_request: + self.fields["algorithm_selectable_gpu_type_choices"].initial = ( + challenge_request.algorithm_selectable_gpu_type_choices + ) + self.fields["algorithm_maximum_settable_memory_gb"].initial = ( + challenge_request.algorithm_maximum_settable_memory_gb + ) self.fields["algorithm_time_limit"].initial = ( challenge_request.inference_time_limit_in_minutes * 60 ) - except ObjectDoesNotExist: + else: + self.fields["algorithm_selectable_gpu_type_choices"].initial = ( + Phase._meta.get_field( + "algorithm_selectable_gpu_type_choices" + ).get_default() + ) + self.fields["algorithm_maximum_settable_memory_gb"].initial = ( + Phase._meta.get_field( + "algorithm_maximum_settable_memory_gb" + ).get_default() + ) self.fields["algorithm_time_limit"].initial = ( Phase._meta.get_field("algorithm_time_limit").get_default() ) diff --git a/app/grandchallenge/evaluation/views/__init__.py b/app/grandchallenge/evaluation/views/__init__.py index 801623d0cd..27dc75ee2c 100644 --- a/app/grandchallenge/evaluation/views/__init__.py +++ b/app/grandchallenge/evaluation/views/__init__.py @@ -1045,6 +1045,12 @@ def form_valid(self, form): inputs=form.cleaned_data["algorithm_inputs"], outputs=form.cleaned_data["algorithm_outputs"], algorithm_time_limit=form.cleaned_data["algorithm_time_limit"], + algorithm_selectable_gpu_type_choices=form.cleaned_data[ + "algorithm_selectable_gpu_type_choices" + ], + algorithm_maximum_settable_memory_gb=form.cleaned_data[ + "algorithm_maximum_settable_memory_gb" + ], ) messages.success(self.request, "Phases were successfully updated") return super().form_valid(form) @@ -1055,7 +1061,14 @@ def get_success_url(self): ) def turn_phase_into_algorithm_phase( - self, *, phase, inputs, outputs, algorithm_time_limit + self, + *, + phase, + inputs, + outputs, + algorithm_time_limit, + algorithm_selectable_gpu_type_choices, + algorithm_maximum_settable_memory_gb, ): archive = Archive.objects.create( title=format_html( @@ -1079,6 +1092,12 @@ def turn_phase_into_algorithm_phase( archive.add_editor(user) phase.algorithm_time_limit = algorithm_time_limit + phase.algorithm_selectable_gpu_type_choices = ( + algorithm_selectable_gpu_type_choices + ) + phase.algorithm_maximum_settable_memory_gb = ( + algorithm_maximum_settable_memory_gb + ) phase.archive = archive phase.submission_kind = phase.SubmissionKindChoices.ALGORITHM phase.creator_must_be_verified = True diff --git a/app/tests/challenges_tests/test_forms.py b/app/tests/challenges_tests/test_forms.py index 07ff0d4b28..26d1ac5286 100644 --- a/app/tests/challenges_tests/test_forms.py +++ b/app/tests/challenges_tests/test_forms.py @@ -3,6 +3,7 @@ import pytest from grandchallenge.challenges.forms import ( + HTMX_BLANK_CHOICE_KEY, ChallengeRequestBudgetUpdateForm, ChallengeRequestForm, ChallengeRequestStatusUpdateForm, @@ -36,6 +37,7 @@ def test_challenge_request_budget_fields_required(): "expected_number_of_teams": 10, "number_of_tasks": 1, "challenge_fee_agreement": True, + "budget_for_hosting_challenge": 0, } form = ChallengeRequestForm(data=data, creator=user) assert not form.is_valid() @@ -58,10 +60,13 @@ def test_challenge_request_budget_fields_required(): "expected_number_of_teams": 10, "number_of_tasks": 1, "challenge_fee_agreement": True, + "budget_for_hosting_challenge": 0, "algorithm_inputs": "foo", "algorithm_outputs": "foo", "average_size_of_test_image_in_mb": 1, "inference_time_limit_in_minutes": 11, + "algorithm_selectable_gpu_type_choices": ["", "A10G", "T4"], + "algorithm_maximum_settable_memory_gb": 32, "phase_1_number_of_submissions_per_team": 1, "phase_2_number_of_submissions_per_team": 1, "phase_1_number_of_test_images": 1, @@ -105,11 +110,17 @@ def test_budget_update_form(): data=data, instance=challenge_request ) assert not form.is_valid() - assert "Please provide an inference time limit." in str(form.errors) + assert "inference_time_limit_in_minutes" in form.errors.keys() data2 = { "expected_number_of_teams": 100, "inference_time_limit_in_minutes": 10, + "algorithm_selectable_gpu_type_choices": [ + HTMX_BLANK_CHOICE_KEY, + "A10G", + "T4", + ], + "algorithm_maximum_settable_memory_gb": 32, "average_size_of_test_image_in_mb": 10, "phase_1_number_of_submissions_per_team": 10, "phase_2_number_of_submissions_per_team": 1, diff --git a/app/tests/challenges_tests/test_models.py b/app/tests/challenges_tests/test_models.py index ca3afb6bef..7552ce8f46 100644 --- a/app/tests/challenges_tests/test_models.py +++ b/app/tests/challenges_tests/test_models.py @@ -109,7 +109,9 @@ def test_is_active_until_set(): @pytest.mark.django_db -def test_total_challenge_cost(): +def test_total_challenge_cost(settings): + settings.COMPONENTS_DEFAULT_BACKEND = "grandchallenge.components.backends.amazon_sagemaker_training.AmazonSageMakerTrainingExecutor" + user_exempt_from_base_cost, normal_user = UserFactory.create_batch(2) request1 = ChallengeRequestFactory( creator=user_exempt_from_base_cost, expected_number_of_teams=3 @@ -120,19 +122,27 @@ def test_total_challenge_cost(): request3 = ChallengeRequestFactory( creator=normal_user, expected_number_of_teams=10 ) + request4 = ChallengeRequestFactory( + creator=normal_user, + expected_number_of_teams=10, + algorithm_selectable_gpu_type_choices=["", "A10G", "T4"], + ) organisation = OrganizationFactory(exempt_from_base_costs=True) organisation.members_group.user_set.add(user_exempt_from_base_cost) - assert request1.storage_and_compute_cost_surplus == -300 + assert request1.storage_and_compute_cost_surplus == -270 assert request1.total_challenge_cost == 1000 - assert request2.storage_and_compute_cost_surplus == -300 + assert request2.storage_and_compute_cost_surplus == -270 assert request2.total_challenge_cost == 6000 - assert request3.storage_and_compute_cost_surplus == 1290 + assert request3.storage_and_compute_cost_surplus == 1380 assert request3.total_challenge_cost == 7500 + assert request4.storage_and_compute_cost_surplus == 2580 + assert request4.total_challenge_cost == 9000 + @pytest.mark.django_db def test_storage_and_compute_cost_add_up_to_total(): diff --git a/app/tests/challenges_tests/test_views.py b/app/tests/challenges_tests/test_views.py index 2ae786e8d7..16d873720a 100644 --- a/app/tests/challenges_tests/test_views.py +++ b/app/tests/challenges_tests/test_views.py @@ -6,6 +6,7 @@ from django.utils.timezone import now from guardian.shortcuts import assign_perm +from grandchallenge.challenges.forms import HTMX_BLANK_CHOICE_KEY from grandchallenge.challenges.models import Challenge, ChallengeRequest from grandchallenge.invoices.models import PaymentStatusChoices from grandchallenge.verifications.models import Verification @@ -385,6 +386,12 @@ def test_budget_field_update(client, challenge_reviewer): data={ "expected_number_of_teams": 500, "inference_time_limit_in_minutes": 10, + "algorithm_selectable_gpu_type_choices": [ + HTMX_BLANK_CHOICE_KEY, + "A10G", + "T4", + ], + "algorithm_maximum_settable_memory_gb": 32, "average_size_of_test_image_in_mb": 10, "phase_1_number_of_submissions_per_team": 10, "phase_2_number_of_submissions_per_team": 1, diff --git a/app/tests/evaluation_tests/test_views.py b/app/tests/evaluation_tests/test_views.py index 5cf4c71ed0..606eb3b88d 100644 --- a/app/tests/evaluation_tests/test_views.py +++ b/app/tests/evaluation_tests/test_views.py @@ -1391,6 +1391,14 @@ def test_configure_algorithm_phases_view(client): phase.algorithm_time_limit == challenge_request.inference_time_limit_in_minutes * 60 ) + assert ( + phase.algorithm_selectable_gpu_type_choices + == challenge_request.algorithm_selectable_gpu_type_choices + ) + assert ( + phase.algorithm_maximum_settable_memory_gb + == challenge_request.algorithm_maximum_settable_memory_gb + ) @pytest.mark.django_db