Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: remove limits #841

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 37 additions & 4 deletions backend/src/v1/complaint/complaint.service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2073,6 +2073,27 @@ export class ComplaintService {
return outcomeData.getCaseFileByLeadId;
};

const _multiFieldCompare = (first: any, second: any, compareInfo: { field: string; sort: string }[]): number => {
for (const item of compareInfo) {
if (item.sort === "asc") {
if (first[item.field] < second[item.field]) {
return -1;
}
if (first[item.field] > second[item.field]) {
return 1;
}
} else if (item.sort === "desc") {
if (first[item.field] > second[item.field]) {
return -1;
}
if (first[item.field] < second[item.field]) {
return 1;
}
}
}
return 0;
};

try {
if (complaintType) {
builder = this._generateQueryBuilder(complaintType);
Expand Down Expand Up @@ -2196,21 +2217,33 @@ export class ComplaintService {
.map((item) => {
return {
name: item.name,
date: _applyTimezone(item.date, tz, "datetime"),
date: item.date,
fileType: getFileType(item.name),
};
});
})
.sort((first, second) =>
_multiFieldCompare(first, second, [
{ field: "fileType", sort: "asc" },
{ field: "date", sort: "asc" },
]),
);
data.hasComplaintAttachments = data.cAtts?.length > 0;

data.oAtts = attachments
.filter((item) => item.type === AttachmentType.OUTCOME_ATTACHMENT)
.map((item) => {
return {
name: item.name,
date: _applyTimezone(item.date, tz, "datetime"),
date: item.date,
fileType: getFileType(item.name),
};
});
})
.sort((first, second) =>
_multiFieldCompare(first, second, [
{ field: "fileType", sort: "asc" },
{ field: "date", sort: "asc" },
]),
);

data.hasOutcomeAttachments = data.oAtts?.length > 0;

Expand Down
6 changes: 0 additions & 6 deletions charts/app/templates/backend/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,6 @@ spec:
- name: FLYWAY_LOCATIONS
value: "{{- if eq .Release.Namespace "c1c7ed-dev" -}}{{ .Values.global.secrets.flywayLocations.dev }}{{- else if eq .Release.Namespace "c1c7ed-test" -}}{{ .Values.global.secrets.flywayLocations.test }}{{- else if eq .Release.Namespace "c1c7ed-prod" -}}{{ .Values.global.secrets.flywayLocations.prod }}{{- else -}}filesystem:./flyway/sql{{- end }}"
resources:
limits:
cpu: 200m
memory: 200Mi
requests:
cpu: 50m
memory: 100Mi
Expand Down Expand Up @@ -93,9 +90,6 @@ spec:
periodSeconds: 30
timeoutSeconds: 5
resources: # this is optional
limits:
cpu: 450m
memory: 150Mi
requests:
cpu: 50m
memory: 75Mi
Expand Down
3 changes: 0 additions & 3 deletions charts/app/templates/frontend/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,6 @@ spec:
periodSeconds: 30
timeoutSeconds: 5
resources:
limits:
cpu: 100m
memory: 150Mi
requests:
cpu: 30m
memory: 50Mi
Expand Down
4 changes: 0 additions & 4 deletions charts/app/templates/webeoc/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,6 @@ spec:
periodSeconds: 30
timeoutSeconds: 5
resources: # this is optional
limits:
ephemeral-storage: "25Mi"
cpu: 80m
memory: 150Mi
requests:
ephemeral-storage: "15Mi"
cpu: 40m
Expand Down
39 changes: 0 additions & 39 deletions charts/app/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,6 @@ backend:
- prod/api-2
#-- resources specific to vault initContainer. it is optional and is an object.
resources:
limits:
cpu: 50m
memory: 50Mi
requests:
cpu: 50m
memory: 25Mi
Expand Down Expand Up @@ -180,16 +177,10 @@ crunchy: # enable it for TEST and PROD, for PR based pipelines simply use single
requests:
cpu: 25m
memory: 256Mi
limits:
cpu: 100m
memory: 512Mi
replicaCertCopy:
requests:
cpu: 1m
memory: 32Mi
limits:
cpu: 50m
memory: 64Mi

pgBackRest:
enabled: false
Expand All @@ -210,16 +201,10 @@ crunchy: # enable it for TEST and PROD, for PR based pipelines simply use single
requests:
cpu: 1m
memory: 64Mi
limits:
cpu: 50m
memory: 128Mi
sidecars:
requests:
cpu: 1m
memory: 64Mi
limits:
cpu: 50m
memory: 128Mi

patroni:
postgresql:
Expand All @@ -238,9 +223,6 @@ crunchy: # enable it for TEST and PROD, for PR based pipelines simply use single
requests:
cpu: 1m
memory: 64Mi
limits:
cpu: 50m
memory: 128Mi

# Postgres Cluster resource values:
pgmonitor:
Expand All @@ -250,9 +232,6 @@ crunchy: # enable it for TEST and PROD, for PR based pipelines simply use single
requests:
cpu: 1m
memory: 64Mi
limits:
cpu: 50m
memory: 128Mi

bitnami-pg:
enabled: true
Expand Down Expand Up @@ -294,9 +273,6 @@ bitnami-pg:
requests:
cpu: 100m
memory: 250Mi
limits:
cpu: 200m
memory: 500Mi

backup:
enabled: false # save quota in dev environment, see gha for override
Expand Down Expand Up @@ -324,9 +300,6 @@ backup:
failedHistoryLimit: 2 # "The number of failed jobs that will be retained"
backoffLimit: 0 # "The number of attempts to try for a successful job outcome"
resources:
limits:
cpu: 150m
memory: 256Mi
requests:
cpu: 10m
memory: 128Mi
Expand All @@ -339,9 +312,6 @@ nats:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 200m
memory: 400Mi
jetstream:
enabled: true
fileStore:
Expand All @@ -366,9 +336,6 @@ nats:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 200m
memory: 400Mi

#-- WebEOC Container
webeoc:
Expand Down Expand Up @@ -416,9 +383,6 @@ metabase:
port: 80
targetPort: 3000
resources:
limits:
cpu: 750m
memory: 1250Mi
requests:
cpu: 150m
memory: 500Mi
Expand Down Expand Up @@ -472,7 +436,4 @@ metabase-pg:
requests:
cpu: 100m
memory: 250Mi
limits:
cpu: 200m
memory: 500Mi
#-- End Metabase Values
5 changes: 0 additions & 5 deletions database/openshift.deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,6 @@ objects:
requests:
storage: ${DB_PVC_SIZE}
cpu: "50m"
limits:
cpu: "100m"
storageClassName: netapp-file-standard
- kind: ImageStream
apiVersion: v1
Expand Down Expand Up @@ -168,9 +166,6 @@ objects:
requests:
cpu: ${CPU_REQUEST}
memory: ${MEMORY_REQUEST}
limits:
cpu: ${CPU_LIMIT}
memory: ${MEMORY_LIMIT}
readinessProbe:
exec:
command:
Expand Down
56 changes: 56 additions & 0 deletions exports/ceeb_complaint_export.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
-----------------------------------------------------
-- Quarterly CEEB Complaint Export query to be run for CEEB statistics
-- see https://github.com/bcgov/nr-compliance-enforcement/wiki/Data-Exports for more information
--
-- Note some extra fields are commented out of the query as these are currently part of the
-- NRIS export however they are not currently being used for reporting
-----------------------------------------------------
select
cmp.complaint_identifier as "Record ID",
TO_CHAR(((cmp.incident_reported_utc_timestmp at time zone 'UTC') at time zone 'PDT'), 'MM/DD/YYYY') as "Date Received",
CASE
WHEN cst.short_description = 'Open' THEN 'Incomplete'
WHEN cst.short_description = 'Pending Review' THEN 'Incomplete'
WHEN cst.short_description = 'Closed' THEN 'Complete'
ELSE cst.short_description
END as "Complaint Status",
cmrc.long_description as "Method Received",
--'IDIR\' || ofc.user_id as "Officer Assigned",
gfv.region_name as "Region",
--gfv.offloc_name as "Office",
goc.short_description as "City/Town",
--cmp.caller_name as "Complainant Contact",
--CASE
-- WHEN cmp.is_privacy_requested = 'Y' THEN 'Yes'
-- ELSE 'No'
--END as "Privacy Requested",
--cmp.caller_email as "Email",
--cmp.caller_phone_1 as "Telephone No.",
--cmp.location_summary_text as "Facility/Site Location",
--ST_Y(cmp.location_geometry_point) as "Latitude",
--ST_X(cmp.location_geometry_point) as "Longitude",
ac.suspect_witnesss_dtl_text as "Alleged Contravener"
from
complaint cmp
join
complaint_status_code cst on cst.complaint_status_code = cmp.complaint_status_code
join
geo_organization_unit_code goc on goc.geo_organization_unit_code = cmp.geo_organization_unit_code
join
cos_geo_org_unit_flat_vw gfv on gfv.area_code = goc.geo_organization_unit_code
left join
comp_mthd_recv_cd_agcy_cd_xref cmrcacx on cmrcacx.comp_mthd_recv_cd_agcy_cd_xref_guid = cmp.comp_mthd_recv_cd_agcy_cd_xref_guid
left join
complaint_method_received_code cmrc on cmrc.complaint_method_received_code = cmrcacx.complaint_method_received_code
left join
person_complaint_xref pcx on pcx.complaint_identifier = cmp.complaint_identifier and pcx.active_ind = true
left join
person per on per.person_guid = pcx.person_guid
left join
officer ofc on ofc.person_guid = per.person_guid
right join
allegation_complaint ac on ac.complaint_identifier = cmp.complaint_identifier
where
cmp.incident_reported_utc_timestmp >= CURRENT_DATE - INTERVAL '1 year'
order by
cmp.complaint_identifier asc
34 changes: 34 additions & 0 deletions exports/merge_exports.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# This script merges two CSV files based on the 'Record ID' column.
# Setup Instructions:
# 1. Ensure Python is installed and added to your system PATH.
# 2. Add the 'Scripts' directory from the Python installation to the PATH (for pip).
# 3. Install required packages by running: pip install pandas
# 4. Place 'complaints.csv' and 'cases.csv' in the same folder as this script.
# 5. Run the script using: python merge_exports.py

import pandas as pd

def main():
# Define filenames
complaint_file = "complaints.csv"
case_file = "cases.csv"
output_file = "NatCom_Export.csv"
merge_column = "Record ID" # CEEB = "Record ID" COS = "Complaint Identifier"

try:
# Load data from both files
complaint_df = pd.read_csv(complaint_file)
case_df = pd.read_csv(case_file)

# Merge data on 'Record ID' with validation
combined_df = pd.merge(complaint_df, case_df, on=merge_column, how="outer", validate="many_to_many")

# Save the merged data to a new CSV file
combined_df.to_csv(output_file, index=False, encoding='utf-8-sig')
print(f"Data successfully merged into {output_file}")

except FileNotFoundError as e:
print(f"Error: {e}\nPlease ensure both files exist in the correct directory.")

if __name__ == "__main__":
main()
Loading