diff --git a/app/grandchallenge/challenges/forms.py b/app/grandchallenge/challenges/forms.py index d9c11e3688..42d8db7aa3 100644 --- a/app/grandchallenge/challenges/forms.py +++ b/app/grandchallenge/challenges/forms.py @@ -16,6 +16,8 @@ from django_select2.forms import Select2MultipleWidget from grandchallenge.challenges.models import Challenge, ChallengeRequest +from grandchallenge.components.models import GPUTypeChoices +from grandchallenge.components.schemas import get_default_gpu_type_choices from grandchallenge.core.widgets import MarkdownEditorInlineWidget from grandchallenge.subdomains.utils import reverse_lazy @@ -45,6 +47,8 @@ "registration_page_markdown", ) +HTMX_BLANK_CHOICE_KEY = "__HTMX_BLANK_CHOICE_KEY__" + class ChallengeUpdateForm(forms.ModelForm): def __init__(self, *args, **kwargs): @@ -108,47 +112,6 @@ def clean(self): return cleaned_data -class ChallengeRequestBudgetFieldValidationMixin: - def clean(self): - cleaned_data = super().clean() - if ( - "average_size_of_test_image_in_mb" not in cleaned_data.keys() - or not cleaned_data["average_size_of_test_image_in_mb"] - ): - raise ValidationError( - "Please provide the average test image size." - ) - if ( - "inference_time_limit_in_minutes" not in cleaned_data.keys() - or not cleaned_data["inference_time_limit_in_minutes"] - ): - raise ValidationError("Please provide an inference time limit.") - if ( - "phase_1_number_of_submissions_per_team" not in cleaned_data.keys() - or "phase_2_number_of_submissions_per_team" - not in cleaned_data.keys() - or cleaned_data["phase_1_number_of_submissions_per_team"] is None - or cleaned_data["phase_2_number_of_submissions_per_team"] is None - ): - raise ValidationError( - "Please provide the number of " - "submissions per team for each phase. Enter 0 for phase 2 " - "if you only have 1 phase." - ) - if ( - "phase_1_number_of_test_images" not in cleaned_data.keys() - or "phase_2_number_of_test_images" not in cleaned_data.keys() - or cleaned_data["phase_1_number_of_test_images"] is None - or cleaned_data["phase_2_number_of_test_images"] is None - ): - raise ValidationError( - "Please provide the number of " - "test images for each phase. Enter 0 for phase 2 if you " - "only have 1 phase." - ) - return cleaned_data - - general_information_items_1 = ( "title", "short_name", @@ -189,9 +152,23 @@ def clean(self): ) -class ChallengeRequestForm( - ChallengeRequestBudgetFieldValidationMixin, forms.ModelForm -): +class ChallengeRequestForm(forms.ModelForm): + algorithm_selectable_gpu_type_choices = forms.MultipleChoiceField( + initial=get_default_gpu_type_choices(), + choices=[ + (choice.value, choice.label) + for choice in [ + GPUTypeChoices.NO_GPU, + GPUTypeChoices.T4, + GPUTypeChoices.A10G, + ] + ], + widget=forms.CheckboxSelectMultiple, + label="Selectable GPU types for algorithm jobs", + help_text="The GPU type choices that participants will be able to select for " + "their algorithm inference jobs.", + ) + class Meta: model = ChallengeRequest fields = ( @@ -203,6 +180,8 @@ class Meta: "number_of_tasks", "average_size_of_test_image_in_mb", "inference_time_limit_in_minutes", + "algorithm_selectable_gpu_type_choices", + "algorithm_maximum_settable_memory_gb", "algorithm_inputs", "algorithm_outputs", *phase_1_items, @@ -260,6 +239,7 @@ class Meta: "phase_2_number_of_submissions_per_team": "Expected number of submissions per team to Phase 2", "budget_for_hosting_challenge": "Budget for hosting challenge in Euros", "inference_time_limit_in_minutes": "Average algorithm job run time in minutes", + "algorithm_maximum_settable_memory_gb": "Maximum memory for algorithm jobs in GB", "structured_challenge_submission_doi": "DOI", "structured_challenge_submission_form": "PDF", "challenge_fee_agreement": format_html( @@ -361,7 +341,8 @@ class Meta: "phase_2_number_of_test_images": ( "Number of test images for this phase. If you're " - "bundling images, enter the number of batches (not the number of single images)." + "bundling images, enter the number of batches (not the number of single images). " + "Enter 0 here if you only have one phase." ), "average_size_of_test_image_in_mb": ( "Average size of test image in MB. If you're one single image, including " + "model loading, i/o, preprocessing and inference." + ), + } def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + if "" in ( + initial := self.instance.algorithm_selectable_gpu_type_choices + ): + initial[initial.index("")] = HTMX_BLANK_CHOICE_KEY + self.fields["algorithm_selectable_gpu_type_choices"].initial = ( + initial + ) self.helper = FormHelper(self) self.helper.form_id = "budget" self.helper.attrs.update( @@ -635,3 +640,11 @@ def __init__(self, *args, **kwargs): } ) self.helper.layout.append(Submit("save", "Save")) + + def clean_algorithm_selectable_gpu_type_choices(self): + data = self.cleaned_data.get( + "algorithm_selectable_gpu_type_choices", [] + ) + if HTMX_BLANK_CHOICE_KEY in data: + data[data.index(HTMX_BLANK_CHOICE_KEY)] = "" + return data diff --git a/app/grandchallenge/challenges/migrations/0045_challengerequest_algorithm_maximum_settable_memory_gb_and_more.py b/app/grandchallenge/challenges/migrations/0045_challengerequest_algorithm_maximum_settable_memory_gb_and_more.py new file mode 100644 index 0000000000..690760c823 --- /dev/null +++ b/app/grandchallenge/challenges/migrations/0045_challengerequest_algorithm_maximum_settable_memory_gb_and_more.py @@ -0,0 +1,137 @@ +# Generated by Django 4.2.16 on 2024-12-05 13:14 + +import django.core.validators +from django.db import migrations, models + +import grandchallenge.components.schemas +import grandchallenge.core.validators + + +class Migration(migrations.Migration): + dependencies = [ + ("challenges", "0044_remove_challenge_registration_page_text"), + ] + + operations = [ + migrations.AddField( + model_name="challengerequest", + name="algorithm_maximum_settable_memory_gb", + field=models.PositiveSmallIntegerField( + default=32, + help_text="Maximum amount of memory that participants will be allowed to assign to algorithm inference jobs for submission.", + ), + ), + migrations.AddField( + model_name="challengerequest", + name="algorithm_selectable_gpu_type_choices", + field=models.JSONField( + default=grandchallenge.components.schemas.get_default_gpu_type_choices, + help_text='The GPU type choices that participants will be able to select for their algorithm inference jobs. Options are ["", "A100", "A10G", "V100", "K80", "T4"].', + validators=[ + grandchallenge.core.validators.JSONValidator( + schema={ + "$schema": "http://json-schema.org/draft-07/schema", + "items": { + "enum": [ + "", + "A100", + "A10G", + "V100", + "K80", + "T4", + ], + "type": "string", + }, + "title": "The Selectable GPU Types Schema", + "type": "array", + "uniqueItems": True, + } + ) + ], + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="algorithm_inputs", + field=models.TextField( + help_text="What are the inputs to the algorithms submitted as solutions to your challenge going to be? Please describe in detail what the input(s) reflect(s), for example, MRI scan of the brain, or chest X-ray. Grand Challenge only supports .mha and .tiff image files and json files for algorithms." + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="algorithm_outputs", + field=models.TextField( + help_text="What are the outputs to the algorithms submitted as solutions to your challenge going to be? Please describe in detail what the output(s) reflect(s), for example, probability of a positive PCR result, or stroke lesion segmentation. " + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="average_size_of_test_image_in_mb", + field=models.PositiveIntegerField( + help_text="Average size of a test image in MB.", + validators=[ + django.core.validators.MinValueValidator(limit_value=1), + django.core.validators.MaxValueValidator( + limit_value=10000 + ), + ], + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="budget_for_hosting_challenge", + field=models.PositiveIntegerField( + default=0, + help_text="What is your budget for hosting this challenge? Please be reminded of our challenge pricing policy.", + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="data_license", + field=models.BooleanField(default=True), + ), + migrations.AlterField( + model_name="challengerequest", + name="inference_time_limit_in_minutes", + field=models.PositiveIntegerField( + help_text="Average run time per algorithm job in minutes.", + validators=[ + django.core.validators.MinValueValidator(limit_value=5), + django.core.validators.MaxValueValidator(limit_value=60), + ], + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="long_term_commitment", + field=models.BooleanField(default=True), + ), + migrations.AlterField( + model_name="challengerequest", + name="phase_1_number_of_submissions_per_team", + field=models.PositiveIntegerField( + help_text="How many submissions do you expect per team in this phase?" + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="phase_1_number_of_test_images", + field=models.PositiveIntegerField( + help_text="Number of test images for this phase." + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="phase_2_number_of_submissions_per_team", + field=models.PositiveIntegerField( + help_text="How many submissions do you expect per team in this phase?" + ), + ), + migrations.AlterField( + model_name="challengerequest", + name="phase_2_number_of_test_images", + field=models.PositiveIntegerField( + help_text="Number of test images for this phase." + ), + ), + ] diff --git a/app/grandchallenge/challenges/models.py b/app/grandchallenge/challenges/models.py index 46e5703b2d..366262fd3b 100644 --- a/app/grandchallenge/challenges/models.py +++ b/app/grandchallenge/challenges/models.py @@ -45,7 +45,11 @@ send_challenge_requested_email_to_reviewers, ) from grandchallenge.challenges.utils import ChallengeTypeChoices -from grandchallenge.components.schemas import GPUTypeChoices +from grandchallenge.components.schemas import ( + SELECTABLE_GPU_TYPES_SCHEMA, + GPUTypeChoices, + get_default_gpu_type_choices, +) from grandchallenge.core.models import UUIDModel from grandchallenge.core.storage import ( get_banner_path, @@ -59,6 +63,7 @@ ) from grandchallenge.core.validators import ( ExtensionValidator, + JSONValidator, MimeTypeValidator, ) from grandchallenge.evaluation.tasks import assign_evaluation_permissions @@ -898,17 +903,29 @@ class ChallengeRequestStatusChoices(models.TextChoices): validators=[MinValueValidator(limit_value=1)], ) inference_time_limit_in_minutes = models.PositiveIntegerField( - blank=True, - null=True, help_text="Average run time per algorithm job in minutes.", validators=[ MinValueValidator(limit_value=5), MaxValueValidator(limit_value=60), ], ) + algorithm_selectable_gpu_type_choices = models.JSONField( + default=get_default_gpu_type_choices, + help_text=( + "The GPU type choices that participants will be able to select for their " + "algorithm inference jobs. Options are " + f"{GPUTypeChoices.values}.".replace("'", '"') + ), + validators=[JSONValidator(schema=SELECTABLE_GPU_TYPES_SCHEMA)], + ) + algorithm_maximum_settable_memory_gb = models.PositiveSmallIntegerField( + default=settings.ALGORITHMS_MAX_MEMORY_GB, + help_text=( + "Maximum amount of memory that participants will be allowed to " + "assign to algorithm inference jobs for submission." + ), + ) average_size_of_test_image_in_mb = models.PositiveIntegerField( - null=True, - blank=True, help_text="Average size of a test image in MB.", validators=[ MinValueValidator(limit_value=1), @@ -916,23 +933,15 @@ class ChallengeRequestStatusChoices(models.TextChoices): ], ) phase_1_number_of_submissions_per_team = models.PositiveIntegerField( - null=True, - blank=True, help_text="How many submissions do you expect per team in this phase?", ) phase_2_number_of_submissions_per_team = models.PositiveIntegerField( - null=True, - blank=True, help_text="How many submissions do you expect per team in this phase?", ) phase_1_number_of_test_images = models.PositiveIntegerField( - null=True, - blank=True, help_text="Number of test images for this phase.", ) phase_2_number_of_test_images = models.PositiveIntegerField( - null=True, - blank=True, help_text="Number of test images for this phase.", ) number_of_tasks = models.PositiveIntegerField( @@ -943,21 +952,17 @@ class ChallengeRequestStatusChoices(models.TextChoices): ) budget_for_hosting_challenge = models.PositiveIntegerField( default=0, - null=True, - blank=True, help_text="What is your budget for hosting this challenge? Please be reminded of our challenge pricing policy.", ) long_term_commitment = models.BooleanField( - null=True, - blank=True, + default=True, ) long_term_commitment_extra = models.CharField( max_length=2000, blank=True, ) data_license = models.BooleanField( - null=True, - blank=True, + default=True, ) data_license_extra = models.CharField( max_length=2000, @@ -968,7 +973,6 @@ class ChallengeRequestStatusChoices(models.TextChoices): help_text="If you have any comments, remarks or questions, please leave them here.", ) algorithm_inputs = models.TextField( - blank=True, help_text="What are the inputs to the algorithms submitted as solutions to " "your challenge going to be? " "Please describe in detail " @@ -977,7 +981,6 @@ class ChallengeRequestStatusChoices(models.TextChoices): "supports .mha and .tiff image files and json files for algorithms.", ) algorithm_outputs = models.TextField( - blank=True, help_text="What are the outputs to the algorithms submitted as solutions to " "your challenge going to be? " "Please describe in detail what the output(s) " @@ -1049,6 +1052,8 @@ def budget_fields(self): "expected_number_of_teams", "number_of_tasks", "inference_time_limit_in_minutes", + "algorithm_selectable_gpu_type_choices", + "algorithm_maximum_settable_memory_gb", "average_size_of_test_image_in_mb", "phase_1_number_of_submissions_per_team", "phase_1_number_of_test_images", @@ -1147,17 +1152,22 @@ def total_data_and_docker_storage_bytes(self): + self.phase_2_storage_size_bytes ) - @property + @cached_property def compute_euro_cents_per_hour(self): - executor = import_string(settings.COMPONENTS_DEFAULT_BACKEND)( - job_id="", - exec_image_repo_tag="", - # Assume these options picked by the participant - memory_limit=32, - time_limit=self.inference_time_limit_in_minutes, - requires_gpu_type=GPUTypeChoices.T4, + executors = [ + import_string(settings.COMPONENTS_DEFAULT_BACKEND)( + job_id="", + exec_image_repo_tag="", + memory_limit=self.algorithm_maximum_settable_memory_gb, + time_limit=self.inference_time_limit_in_minutes, + requires_gpu_type=gpu_type, + ) + for gpu_type in self.algorithm_selectable_gpu_type_choices + ] + usd_cents_per_hour = max( + executor.usd_cents_per_hour for executor in executors ) - return executor.usd_cents_per_hour * settings.COMPONENTS_USD_TO_EUR + return usd_cents_per_hour * settings.COMPONENTS_USD_TO_EUR def get_compute_costs_euros(self, duration): return self.round_to_10_euros( diff --git a/app/grandchallenge/challenges/templates/challenges/partials/budget_table.html b/app/grandchallenge/challenges/templates/challenges/partials/budget_table.html index eacdfd2a19..2cc0e7af36 100644 --- a/app/grandchallenge/challenges/templates/challenges/partials/budget_table.html +++ b/app/grandchallenge/challenges/templates/challenges/partials/budget_table.html @@ -8,7 +8,9 @@