Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update budget based on gpu type and memory #3731

Merged
merged 30 commits into from
Dec 6, 2024
Merged
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
6552a2e
Create model fields on ChallengeRequest for job requirements options
koopmant Dec 4, 2024
bcc5610
Include new fields on the challenge request form
koopmant Dec 4, 2024
0108732
Calculate max compute costs based on gpu types and memory set.
koopmant Dec 5, 2024
b4939ab
Fix connection with model field
koopmant Dec 5, 2024
5c773d5
Add label
koopmant Dec 5, 2024
7190f39
Add fields to budget fields for display
koopmant Dec 5, 2024
5b99e8d
Add fields to the budget update form
koopmant Dec 5, 2024
49bfa12
Remove comment
koopmant Dec 5, 2024
f5c9f61
Add labels and sync help text of budget update form to request form
koopmant Dec 5, 2024
fa94675
Set field required on model instead of form
koopmant Dec 5, 2024
400b657
Fix tests
koopmant Dec 5, 2024
6157aba
Remove manual validation.
koopmant Dec 5, 2024
c747828
Update migration
koopmant Dec 5, 2024
ad00d5f
Return costs in development backend for different gpu types
koopmant Dec 5, 2024
8eeb733
Test budget calculation with adjusted gpu type selection
koopmant Dec 5, 2024
1b13433
Limit additional choices to g5 instances
koopmant Dec 5, 2024
0db79b4
Make fields non-nullable and update migration
koopmant Dec 5, 2024
291ad12
Revert "Return costs in development backend for different gpu types"
koopmant Dec 5, 2024
d06df81
Use AmazonSageMakerTrainingExecutor for relevant test
koopmant Dec 5, 2024
6087772
Explicitly put original value of setting back after test
koopmant Dec 5, 2024
8422c1a
Fix test
koopmant Dec 5, 2024
f247e30
Remove initial for update form
koopmant Dec 5, 2024
2a5b0fb
Workaround for htmx post serialization issue
koopmant Dec 5, 2024
3381ff5
Cleanup test
koopmant Dec 6, 2024
d1e48be
Fix tests failing after htmx post workaround
koopmant Dec 6, 2024
a570c10
Revert "Explicitly put original value of setting back after test"
koopmant Dec 6, 2024
4a9868e
Replace "no_gpu" with variable
koopmant Dec 6, 2024
db9db17
Get job settings from challenge request in phase conversion form
koopmant Dec 6, 2024
8e6ea2c
Fix fixture usage
jmsmkn Dec 6, 2024
6ca8be0
Remove unneccessary attribute settings in ChallengeRequestFactory
koopmant Dec 6, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 72 additions & 60 deletions app/grandchallenge/challenges/forms.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
from django_select2.forms import Select2MultipleWidget

from grandchallenge.challenges.models import Challenge, ChallengeRequest
from grandchallenge.components.models import GPUTypeChoices
from grandchallenge.components.schemas import get_default_gpu_type_choices
from grandchallenge.core.widgets import MarkdownEditorInlineWidget
from grandchallenge.subdomains.utils import reverse_lazy

Expand Down Expand Up @@ -108,47 +110,6 @@ def clean(self):
return cleaned_data


class ChallengeRequestBudgetFieldValidationMixin:
def clean(self):
cleaned_data = super().clean()
if (
"average_size_of_test_image_in_mb" not in cleaned_data.keys()
or not cleaned_data["average_size_of_test_image_in_mb"]
):
raise ValidationError(
"Please provide the average test image size."
)
if (
"inference_time_limit_in_minutes" not in cleaned_data.keys()
or not cleaned_data["inference_time_limit_in_minutes"]
):
raise ValidationError("Please provide an inference time limit.")
if (
"phase_1_number_of_submissions_per_team" not in cleaned_data.keys()
or "phase_2_number_of_submissions_per_team"
not in cleaned_data.keys()
or cleaned_data["phase_1_number_of_submissions_per_team"] is None
or cleaned_data["phase_2_number_of_submissions_per_team"] is None
):
raise ValidationError(
"Please provide the number of "
"submissions per team for each phase. Enter 0 for phase 2 "
"if you only have 1 phase."
)
if (
"phase_1_number_of_test_images" not in cleaned_data.keys()
or "phase_2_number_of_test_images" not in cleaned_data.keys()
or cleaned_data["phase_1_number_of_test_images"] is None
or cleaned_data["phase_2_number_of_test_images"] is None
):
raise ValidationError(
"Please provide the number of "
"test images for each phase. Enter 0 for phase 2 if you "
"only have 1 phase."
)
return cleaned_data


general_information_items_1 = (
"title",
"short_name",
Expand Down Expand Up @@ -189,9 +150,23 @@ def clean(self):
)


class ChallengeRequestForm(
ChallengeRequestBudgetFieldValidationMixin, forms.ModelForm
):
class ChallengeRequestForm(forms.ModelForm):
algorithm_selectable_gpu_type_choices = forms.MultipleChoiceField(
initial=get_default_gpu_type_choices(),
choices=[
(choice.value, choice.label)
for choice in [
GPUTypeChoices.NO_GPU,
GPUTypeChoices.T4,
GPUTypeChoices.A10G,
koopmant marked this conversation as resolved.
Show resolved Hide resolved
]
],
widget=forms.CheckboxSelectMultiple,
label="Selectable GPU types for algorithm jobs",
help_text="The GPU type choices that participants will be able to select for "
"their algorithm inference jobs.",
)

class Meta:
model = ChallengeRequest
fields = (
Expand All @@ -203,6 +178,8 @@ class Meta:
"number_of_tasks",
"average_size_of_test_image_in_mb",
"inference_time_limit_in_minutes",
"algorithm_selectable_gpu_type_choices",
"algorithm_maximum_settable_memory_gb",
"algorithm_inputs",
"algorithm_outputs",
*phase_1_items,
Expand Down Expand Up @@ -260,6 +237,7 @@ class Meta:
"phase_2_number_of_submissions_per_team": "Expected number of submissions per team to Phase 2",
"budget_for_hosting_challenge": "Budget for hosting challenge in Euros",
"inference_time_limit_in_minutes": "Average algorithm job run time in minutes",
"algorithm_maximum_settable_memory_gb": "Maximum memory for algorithm jobs in GB",
"structured_challenge_submission_doi": "DOI",
"structured_challenge_submission_form": "PDF",
"challenge_fee_agreement": format_html(
Expand Down Expand Up @@ -361,7 +339,8 @@ class Meta:
"phase_2_number_of_test_images": (
"Number of test images for this phase. If you're <a href="
"'https://grand-challenge.org/documentation/create-your-own-challenge/#budget-batched-images'>"
"bundling images</a>, enter the number of batches (not the number of single images)."
"bundling images</a>, enter the number of batches (not the number of single images). "
"Enter 0 here if you only have one phase."
),
"average_size_of_test_image_in_mb": (
"Average size of test image in MB. If you're <a href="
Expand All @@ -376,7 +355,7 @@ class Meta:
"phase_2_number_of_submissions_per_team": (
"How many submissions do you expect per team to this phase? "
"You can enforce a submission limit in the settings for each phase "
"to control this."
"to control this. Enter 0 here if you only have one phase."
),
"submission_assessment": (
f"{structured_challenge_submission_help_text} Otherwise, "
Expand All @@ -395,17 +374,6 @@ def __init__(self, creator, *args, **kwargs):
self.instance.creator = creator
self.fields["title"].required = True
self.fields["challenge_fee_agreement"].required = True
self.fields["algorithm_inputs"].required = True
self.fields["algorithm_outputs"].required = True
self.fields["number_of_tasks"].required = True
self.fields["average_size_of_test_image_in_mb"].required = True
self.fields["inference_time_limit_in_minutes"].required = True
self.fields["phase_1_number_of_submissions_per_team"].required = True
self.fields["phase_2_number_of_submissions_per_team"].required = True
self.fields["phase_1_number_of_test_images"].required = True
self.fields["phase_2_number_of_test_images"].required = True
self.fields["data_license"].initial = True
self.fields["long_term_commitment"].initial = True
self.helper = FormHelper(self)
self.helper.layout = Layout(
Fieldset(
Expand Down Expand Up @@ -490,6 +458,8 @@ def __init__(self, creator, *args, **kwargs):
"number_of_tasks",
"average_size_of_test_image_in_mb",
"inference_time_limit_in_minutes",
"algorithm_selectable_gpu_type_choices",
"algorithm_maximum_settable_memory_gb",
HTML(
format_html(
(
Expand Down Expand Up @@ -604,24 +574,58 @@ def clean_status(self):
return status


class ChallengeRequestBudgetUpdateForm(
ChallengeRequestBudgetFieldValidationMixin, forms.ModelForm
):
class ChallengeRequestBudgetUpdateForm(forms.ModelForm):
algorithm_selectable_gpu_type_choices = forms.MultipleChoiceField(
# replace NO_GPU value ("") in choices to avoid HTMX POST issue
choices=[
("no_gpu", GPUTypeChoices.NO_GPU.label),
koopmant marked this conversation as resolved.
Show resolved Hide resolved
(GPUTypeChoices.T4, GPUTypeChoices.T4.label),
(GPUTypeChoices.A10G, GPUTypeChoices.A10G.label),
],
widget=forms.CheckboxSelectMultiple,
label="Selectable GPU types for algorithm jobs",
help_text="The GPU type choices that participants will be able to select for "
"their algorithm inference jobs.",
)

class Meta:
model = ChallengeRequest
fields = (
"expected_number_of_teams",
"number_of_tasks",
"inference_time_limit_in_minutes",
"algorithm_selectable_gpu_type_choices",
"algorithm_maximum_settable_memory_gb",
"average_size_of_test_image_in_mb",
"phase_1_number_of_submissions_per_team",
"phase_1_number_of_test_images",
"phase_2_number_of_submissions_per_team",
"phase_2_number_of_test_images",
)
labels = {
"phase_1_number_of_submissions_per_team": "Expected number of submissions per team to Phase 1",
"phase_2_number_of_submissions_per_team": "Expected number of submissions per team to Phase 2",
"inference_time_limit_in_minutes": "Average algorithm job run time in minutes",
"algorithm_maximum_settable_memory_gb": "Maximum memory for algorithm jobs in GB",
}
help_texts = {
"inference_time_limit_in_minutes": (
"The average time that you expect an algorithm job to take in minutes. "
"This time estimate should account for everything that needs to happen "
"for an algorithm container to process <u>one single image, including "
"model loading, i/o, preprocessing and inference.</u>"
),
}

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "" in (
initial := self.instance.algorithm_selectable_gpu_type_choices
):
initial[initial.index("")] = "no_gpu" # to avoid HTMX POST issue
self.fields["algorithm_selectable_gpu_type_choices"].initial = (
initial
)
self.helper = FormHelper(self)
self.helper.form_id = "budget"
self.helper.attrs.update(
Expand All @@ -635,3 +639,11 @@ def __init__(self, *args, **kwargs):
}
)
self.helper.layout.append(Submit("save", "Save"))

def clean_algorithm_selectable_gpu_type_choices(self):
data = self.cleaned_data.get(
"algorithm_selectable_gpu_type_choices", []
)
if "no_gpu" in data: # to avoid HTMX POST issue
data[data.index("no_gpu")] = ""
return data
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
# Generated by Django 4.2.16 on 2024-12-05 13:14

import django.core.validators
from django.db import migrations, models

import grandchallenge.components.schemas
import grandchallenge.core.validators


class Migration(migrations.Migration):
dependencies = [
("challenges", "0044_remove_challenge_registration_page_text"),
]

operations = [
migrations.AddField(
model_name="challengerequest",
name="algorithm_maximum_settable_memory_gb",
field=models.PositiveSmallIntegerField(
default=32,
help_text="Maximum amount of memory that participants will be allowed to assign to algorithm inference jobs for submission.",
),
),
migrations.AddField(
model_name="challengerequest",
name="algorithm_selectable_gpu_type_choices",
field=models.JSONField(
default=grandchallenge.components.schemas.get_default_gpu_type_choices,
help_text='The GPU type choices that participants will be able to select for their algorithm inference jobs. Options are ["", "A100", "A10G", "V100", "K80", "T4"].',
validators=[
grandchallenge.core.validators.JSONValidator(
schema={
"$schema": "http://json-schema.org/draft-07/schema",
"items": {
"enum": [
"",
"A100",
"A10G",
"V100",
"K80",
"T4",
],
"type": "string",
},
"title": "The Selectable GPU Types Schema",
"type": "array",
"uniqueItems": True,
}
)
],
),
),
migrations.AlterField(
model_name="challengerequest",
name="algorithm_inputs",
field=models.TextField(
help_text="What are the inputs to the algorithms submitted as solutions to your challenge going to be? Please describe in detail what the input(s) reflect(s), for example, MRI scan of the brain, or chest X-ray. Grand Challenge only supports .mha and .tiff image files and json files for algorithms."
),
),
migrations.AlterField(
model_name="challengerequest",
name="algorithm_outputs",
field=models.TextField(
help_text="What are the outputs to the algorithms submitted as solutions to your challenge going to be? Please describe in detail what the output(s) reflect(s), for example, probability of a positive PCR result, or stroke lesion segmentation. "
),
),
migrations.AlterField(
model_name="challengerequest",
name="average_size_of_test_image_in_mb",
field=models.PositiveIntegerField(
help_text="Average size of a test image in MB.",
validators=[
django.core.validators.MinValueValidator(limit_value=1),
django.core.validators.MaxValueValidator(
limit_value=10000
),
],
),
),
migrations.AlterField(
model_name="challengerequest",
name="budget_for_hosting_challenge",
field=models.PositiveIntegerField(
default=0,
help_text="What is your budget for hosting this challenge? Please be reminded of our <a href='/challenge-policy-and-pricing/'>challenge pricing policy</a>.",
),
),
migrations.AlterField(
model_name="challengerequest",
name="data_license",
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name="challengerequest",
name="inference_time_limit_in_minutes",
field=models.PositiveIntegerField(
help_text="Average run time per algorithm job in minutes.",
validators=[
django.core.validators.MinValueValidator(limit_value=5),
django.core.validators.MaxValueValidator(limit_value=60),
],
),
),
migrations.AlterField(
model_name="challengerequest",
name="long_term_commitment",
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name="challengerequest",
name="phase_1_number_of_submissions_per_team",
field=models.PositiveIntegerField(
help_text="How many submissions do you expect per team in this phase?"
),
),
migrations.AlterField(
model_name="challengerequest",
name="phase_1_number_of_test_images",
field=models.PositiveIntegerField(
help_text="Number of test images for this phase."
),
),
migrations.AlterField(
model_name="challengerequest",
name="phase_2_number_of_submissions_per_team",
field=models.PositiveIntegerField(
help_text="How many submissions do you expect per team in this phase?"
),
),
migrations.AlterField(
model_name="challengerequest",
name="phase_2_number_of_test_images",
field=models.PositiveIntegerField(
help_text="Number of test images for this phase."
),
),
]
Loading
Loading