Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 5 additions & 14 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,28 +29,19 @@ repos:
- id: pyupgrade
args: [--py310-plus]

- repo: https://github.com/psf/black
rev: 23.3.0
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.13.1
hooks:
- id: black

- repo: https://github.com/PyCQA/isort
rev: 5.12.0
hooks:
- id: isort

- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
- id: ruff
args: [--fix]
- id: ruff-format

- repo: https://github.com/Riverside-Healthcare/djLint
rev: v1.31.1
hooks:
- id: djlint-reformat-django
- id: djlint-django


# sets up .pre-commit-ci.yaml to ensure pre-commit dependencies stay up to date
ci:
autoupdate_schedule: weekly
Expand Down
83 changes: 40 additions & 43 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -1,47 +1,44 @@
{
"editor.formatOnSave": true,
"diffEditor.codeLens": true,
"eslint.enable": true,
"editor.formatOnSave": true,
"diffEditor.codeLens": true,
"eslint.enable": true,
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit",
"source.sortImports": "explicit",
"source.fixAll.markdownlint": "explicit",
"source.fixAll": "explicit"
},

"typescript.format.enable": true,
"prettier.requireConfig": true,

"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter",
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit",
"source.sortImports": "explicit",
"source.fixAll.markdownlint": "explicit",
"source.fixAll": "explicit"
"source.fixAll": "explicit"
},
"isort.args": [
"--profile",
"black"
],
"typescript.format.enable": true,
"prettier.requireConfig": true,
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter",
"editor.tabSize": 4,
"editor.rulers": [
119
]
},
"black-formatter.args": ["--line-length", "119"],
"flake8.args": [
"--max-line-length", "119"
],
"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[javascriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"files.eol": "\n",
"files.insertFinalNewline": true,
"files.trimTrailingWhitespace": true,
"python.analysis.typeCheckingMode": "basic"
"editor.tabSize": 4,
"editor.rulers": [119]
},

"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[javascriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},

"files.eol": "\n",
"files.insertFinalNewline": true,
"files.trimTrailingWhitespace": true,
"python.analysis.typeCheckingMode": "basic"
}
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -252,3 +252,6 @@ The local environment uses a local PostgreSQL database in a Docker container.
### Load fixtures with test data

docker compose run --rm django python manage.py migrate
### Linting

This project uses Ruff for linting and formatting — it’s super fast and replaces tools like Black, isort, and Flake8 for a cleaner, all-in-one setup.
2 changes: 1 addition & 1 deletion ami/base/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def get_project(self):
def __str__(self) -> str:
"""All django models should have this method."""
if hasattr(self, "name"):
name = getattr(self, "name") or "Untitled"
name = self.name or "Untitled"
return f"#{self.pk} {name}"
else:
return f"{self.__class__.__name__} #{self.pk}"
Expand Down
4 changes: 3 additions & 1 deletion ami/base/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
logger = logging.getLogger(__name__)


def reverse_with_params(viewname: str, args=None, kwargs=None, request=None, params: dict = {}, **extra) -> str:
def reverse_with_params(viewname: str, args=None, kwargs=None, request=None, params: dict = None, **extra) -> str:
if params is None:
params = {}
query_string = urllib.parse.urlencode(params)
base_url = reverse(viewname, request=request, args=args, kwargs=kwargs, **extra)
url = urllib.parse.urlunsplit(("", "", base_url, query_string, ""))
Expand Down
4 changes: 2 additions & 2 deletions ami/exports/format_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def export(self):
first = True
f.write("[")
records_exported = 0
for i, batch in enumerate(get_data_in_batches(self.queryset, self.get_serializer_class())):
for _i, batch in enumerate(get_data_in_batches(self.queryset, self.get_serializer_class())):
json_data = json.dumps(batch, cls=DjangoJSONEncoder)
json_data = json_data[1:-1] # remove [ and ] from json string
f.write(",\n" if not first else "")
Expand Down Expand Up @@ -153,7 +153,7 @@ def export(self):
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writeheader()

for i, batch in enumerate(get_data_in_batches(self.queryset, self.serializer_class)):
for _i, batch in enumerate(get_data_in_batches(self.queryset, self.serializer_class)):
writer.writerows(batch)
records_exported += len(batch)
self.update_job_progress(records_exported)
Expand Down
2 changes: 1 addition & 1 deletion ami/exports/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def get_data_in_batches(QuerySet: models.QuerySet, Serializer: type[serializers.
batch = []

fake_request = generate_fake_request()
for i, item in enumerate(items):
for _i, item in enumerate(items):
try:
serializer = Serializer(
item,
Expand Down
4 changes: 1 addition & 3 deletions ami/jobs/management/commands/update_stale_jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@


class Command(BaseCommand):
help = (
"Update the status of all jobs that are not in a final state " "and have not been updated in the last X hours."
)
help = "Update the status of all jobs that are not in a final state and have not been updated in the last X hours."

# Add argument for the number of hours to consider a job stale
def add_arguments(self, parser):
Expand Down
8 changes: 4 additions & 4 deletions ami/jobs/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,17 +412,17 @@ def run(cls, job: "Job"):

for i, chunk in enumerate(chunks):
request_sent = time.time()
job.logger.info(f"Processing image batch {i+1} of {len(chunks)}")
job.logger.info(f"Processing image batch {i + 1} of {len(chunks)}")
try:
results = job.pipeline.process_images(
images=chunk,
job_id=job.pk,
project_id=job.project.pk,
)
job.logger.info(f"Processed image batch {i+1} in {time.time() - request_sent:.2f}s")
job.logger.info(f"Processed image batch {i + 1} in {time.time() - request_sent:.2f}s")
except Exception as e:
# Log error about image batch and continue
job.logger.error(f"Failed to process image batch {i+1}: {e}")
job.logger.error(f"Failed to process image batch {i + 1}: {e}")
request_failed_images.extend([img.pk for img in chunk])
else:
total_captures += len(results.source_images)
Expand All @@ -433,7 +433,7 @@ def run(cls, job: "Job"):
# @TODO add callback to report errors while saving results marking the job as failed
save_results_task: AsyncResult = job.pipeline.save_results_async(results=results, job_id=job.pk)
save_tasks.append((i + 1, save_results_task))
job.logger.info(f"Saving results for batch {i+1} in sub-task {save_results_task.id}")
job.logger.info(f"Saving results for batch {i + 1} in sub-task {save_results_task.id}")

job.progress.update_stage(
"process",
Expand Down
12 changes: 6 additions & 6 deletions ami/main/api/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1299,12 +1299,12 @@ def get_determination_details(self, obj: Occurrence):
else:
prediction = ClassificationNestedSerializer(obj.best_prediction, context=context).data

return dict(
taxon=taxon,
identification=identification,
prediction=prediction,
score=obj.determination_score,
)
return {
"taxon": taxon,
"identification": identification,
"prediction": prediction,
"score": obj.determination_score,
}


class OccurrenceSerializer(OccurrenceListSerializer):
Expand Down
22 changes: 12 additions & 10 deletions ami/main/charts.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def captures_per_day(project_pk: int):
)

if captures_per_date:
days, counts = list(zip(*captures_per_date))
days, counts = list(zip(*captures_per_date, strict=False))
days = [day for day in days if day]
# tickvals_per_month = [f"{d:%b}" for d in days]
tickvals = [f"{days[0]:%b %d}", f"{days[-1]:%b %d}"]
Expand Down Expand Up @@ -119,7 +119,7 @@ def captures_per_month(project_pk: int):
)

if captures_per_month:
months, counts = list(zip(*captures_per_month))
months, counts = list(zip(*captures_per_month, strict=False))
# tickvals_per_month = [f"{d:%b}" for d in days]
tickvals = [f"{months[0]}", f"{months[-1]}"]
# labels = [f"{d}" for d in months]
Expand All @@ -146,7 +146,7 @@ def events_per_week(project_pk: int):
)

if captures_per_week:
weeks, counts = list(zip(*captures_per_week))
weeks, counts = list(zip(*captures_per_week, strict=False))
# tickvals_per_month = [f"{d:%b}" for d in days]
tickvals = [f"{weeks[0]}", f"{weeks[-1]}"]
labels = [f"{d}" for d in weeks]
Expand All @@ -172,7 +172,7 @@ def events_per_month(project_pk: int):
)

if captures_per_month:
months, counts = list(zip(*captures_per_month))
months, counts = list(zip(*captures_per_month, strict=False))
# tickvals_per_month = [f"{d:%b}" for d in days]
tickvals = [f"{months[0]}", f"{months[-1]}"]
# labels = [f"{d}" for d in months]
Expand Down Expand Up @@ -257,7 +257,7 @@ def occurrences_accumulated(project_pk: int):

occurrences_exist = Occurrence.objects.filter(project=project_pk).exists()
if occurrences_exist:
days, counts = list(zip(*occurrences_per_day))
days, counts = list(zip(*occurrences_per_day, strict=False))
# Accumulate the counts
counts = list(itertools.accumulate(counts))
# tickvals = [f"{d:%b %d}" for d in days]
Expand Down Expand Up @@ -288,7 +288,9 @@ def event_detections_per_hour(event_pk: int):
# hours, counts = list(zip(*detections_per_hour))
if detections_per_hour:
hours, counts = list(
zip(*[(d["source_image__timestamp__hour"], d["num_detections"]) for d in detections_per_hour])
zip(
*[(d["source_image__timestamp__hour"], d["num_detections"]) for d in detections_per_hour], strict=False
)
)
hours, counts = shift_to_nighttime(list(hours), list(counts))
# @TODO show a tick for every hour even if there are no detections
Expand Down Expand Up @@ -317,7 +319,7 @@ def event_top_taxa(event_pk: int, top_n: int = 10):
)

if top_taxa:
taxa, counts = list(zip(*[(t["name"], t["num_detections"]) for t in reversed(top_taxa)]))
taxa, counts = list(zip(*[(t["name"], t["num_detections"]) for t in reversed(top_taxa)], strict=False))
taxa = [t or "Unknown" for t in taxa]
counts = [c or 0 for c in counts]
else:
Expand All @@ -340,7 +342,7 @@ def project_top_taxa(project_pk: int, top_n: int = 10):
)

if top_taxa:
taxa, counts = list(zip(*[(t.name, t.occurrence_count) for t in reversed(top_taxa)]))
taxa, counts = list(zip(*[(t.name, t.occurrence_count) for t in reversed(top_taxa)], strict=False))
else:
taxa, counts = [], []

Expand All @@ -363,7 +365,7 @@ def unique_species_per_month(project_pk: int):
)

# Create a dictionary mapping month numbers to species counts
month_to_count = {month: count for month, count in unique_species_per_month}
month_to_count = dict(unique_species_per_month)

# Create lists for all 12 months, using 0 for months with no data
all_months = list(range(1, 13)) # 1-12 for January-December
Expand Down Expand Up @@ -393,7 +395,7 @@ def average_occurrences_per_month(project_pk: int):
)

# Create a dictionary mapping month numbers to occurrence counts
month_to_count = {month: count for month, count in occurrences_per_month}
month_to_count = dict(occurrences_per_month)

# Create lists for all 12 months, using 0 for months with no data
all_months = list(range(1, 13)) # 1-12 for January-December
Expand Down
18 changes: 9 additions & 9 deletions ami/main/management/commands/import_taxa.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from ...models import TaxaList, Taxon, TaxonRank

RANK_CHOICES = [rank for rank in TaxonRank]
RANK_CHOICES = list(TaxonRank)

logger = logging.getLogger(__name__)
# Set level
Expand All @@ -34,7 +34,7 @@

def read_csv(fname: str) -> list[dict]:
reader = csv.DictReader(open(fname))
taxa = [row for row in reader]
taxa = list(reader)
return taxa


Expand Down Expand Up @@ -79,7 +79,7 @@ def fix_generic_names(taxon_data: dict) -> dict:
fixed_taxon_data = taxon_data.copy()
generic_names = ["sp.", "sp", "spp", "spp.", "cf.", "cf", "aff.", "aff"]
fallback_name_keys = ["bold_taxon_bin", "inat_taxon_id", "gbif_taxon_key"]
for key, value in taxon_data.items():
for _key, value in taxon_data.items():
if value and value.lower() in generic_names:
# set name to first fallback name that exists
fallback_name = None
Expand Down Expand Up @@ -236,7 +236,7 @@ def handle(self, *args, **options):

taxalist, created = TaxaList.objects.get_or_create(name=list_name)
if created:
self.stdout.write(self.style.SUCCESS('Successfully created taxa list "%s"' % taxalist))
self.stdout.write(self.style.SUCCESS(f'Successfully created taxa list "{taxalist}"'))

if options["purge"]:
self.stdout.write(self.style.WARNING("Purging all taxa from the database in 5 seconds..."))
Expand Down Expand Up @@ -319,11 +319,11 @@ def create_taxon(self, taxon_data: dict, root_taxon_parent: Taxon) -> tuple[set[
# If the taxon already exists, use it and maybe update it
taxon, created = Taxon.objects.get_or_create(
name=name,
defaults=dict(
rank=rank,
gbif_taxon_key=gbif_taxon_key,
parent=parent_taxon,
),
defaults={
"rank": rank,
"gbif_taxon_key": gbif_taxon_key,
"parent": parent_taxon,
},
)
taxa_in_row.append(taxon)

Expand Down
Loading