Skip to content

Commit

Permalink
Fix line length (partial #5358)
Browse files Browse the repository at this point in the history
  • Loading branch information
nadove-ucsc committed Sep 6, 2023
1 parent 5ceee59 commit 5746788
Show file tree
Hide file tree
Showing 82 changed files with 1,087 additions and 430 deletions.
31 changes: 18 additions & 13 deletions .flake8/azul_flake8.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,13 +253,13 @@ def check_statement_order(self, node: EitherImport) -> None:
if module_name not in self.expected_resolution_failures:
self._error(node, ImportErrors.unresolvable)
else:
# The order in which NodeVisitor traverses the syntax tree is unspecified
# so we can't be sure which nodes have already been visited.
# To prevent a single out-of-order import from causing errors on every
# other line, we only compare each import statement with one other statement.
# To prevent the same line from being reported twice, the error is always
# reported for the node we're currently visiting, regardless of whether it
# comes first or second.
# The order in which NodeVisitor traverses the syntax tree is
# unspecified so we can't be sure which nodes have already been
# visited. To prevent a single out-of-order import from causing
# errors on every other line, we only compare each import statement
# with one other statement. To prevent the same line from being
# reported twice, the error is always reported for the node we're
# currently visiting, regardless of whether it comes first or second.
pred = self._visited_predecessor(node)
succ = self._visited_successor(node)
if pred is not None and not self._is_correct_order(pred, ordered_import):
Expand Down Expand Up @@ -339,14 +339,19 @@ def _visited_predecessor(self, node: EitherImport) -> Optional[OrderedImport]:
Scan the list of previously visited nodes for the node with the highest
line number that is less than the provided node's line number.
"""
return max(filter(lambda t: t.node.lineno < node.lineno, self.visited_order_info),
key=lambda t: t.node.lineno,
default=None)
return max(
filter(lambda t: t.node.lineno < node.lineno, self.visited_order_info),
key=lambda t: t.node.lineno,
default=None
)

def _filtered_tokens(self, linenno):
return [tokeninfo
for tokeninfo in self.line_tokens[linenno] # 1-based indexing for source code lines
if tokeninfo.type not in (tokenize.COMMENT, tokenize.NEWLINE, tokenize.NL, tokenize.ENDMARKER)]
return [
tokeninfo
# 1-based indexing for source code lines
for tokeninfo in self.line_tokens[linenno]
if tokeninfo.type not in (tokenize.COMMENT, tokenize.NEWLINE, tokenize.NL, tokenize.ENDMARKER)
]


class AzulImports:
Expand Down
3 changes: 2 additions & 1 deletion docs/hca_file_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,8 @@ def download_file(self, url: str, output_path: str) -> None:
"""
Download a file from the given URL.
"""
url = url.replace('/fetch', '') # Work around https://github.com/DataBiosphere/azul/issues/2908
# Work around https://github.com/DataBiosphere/azul/issues/2908
url = url.replace('/fetch', '')
response = requests.get(url, stream=True)
response.raise_for_status()
try:
Expand Down
6 changes: 5 additions & 1 deletion scripts/cgm_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,11 @@ def process_project(self, project: Mapping[str, Any]) -> bool:
self.rows_completed.extend(rows_processed)
return True

def get_blob(self, project_uuid: str, shortname: str, file_name: str) -> gcs.Blob:
def get_blob(self,
project_uuid: str,
shortname: str,
file_name: str
) -> gcs.Blob:
"""
Return the blob from the source bucket.
"""
Expand Down
5 changes: 4 additions & 1 deletion scripts/envhook.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,10 @@ def share_aws_cli_credential_cache(self):
NV = TypeVar('NV')


def zip_dict(old: Mapping[K, OV], new: Mapping[K, NV], missing=None) -> dict[K, tuple[OV, NV]]:
def zip_dict(old: Mapping[K, OV],
new: Mapping[K, NV],
missing=None
) -> dict[K, tuple[OV, NV]]:
"""
Merge two dictionaries. The resulting dictionary contains an entry for every
key in either `old` or `new`. Each entry in the result associates a key to
Expand Down
11 changes: 8 additions & 3 deletions scripts/manage_requirements.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ def create(cls, req: Requirement) -> Optional['PinnedRequirement']:
assert op == '=='
return cls(name=req.name.lower(), versions=Versions(version))
elif req.vcs:
reject(req.revision is None, 'VCS requirements must carry a specific revision', req)
reject(req.revision is None,
'VCS requirements must carry a specific revision', req)
return cls(name=req.name.lower())
elif req.recursive:
return None
Expand Down Expand Up @@ -191,7 +192,8 @@ def run(self):
direct_runtime_reqs = self.get_direct_reqs(self.runtime)
direct_build_reqs = self.get_direct_reqs(self.build)
dupes = direct_build_reqs & direct_runtime_reqs
require(not dupes, 'Some requirements are declared as both run and build time', dupes)
require(not dupes,
'Some requirements are declared as both run and build time', dupes)

all_reqs = self.get_reqs(self.build)
build_reqs = all_reqs - pip_reqs
Expand Down Expand Up @@ -265,7 +267,10 @@ def get_direct_reqs(self, qualifier: Qualifier) -> PinnedRequirements:
with open(path) as f:
return self.parse_reqs(f)

def write_transitive_reqs(self, reqs: PinnedRequirements, qualifier: Qualifier) -> None:
def write_transitive_reqs(self,
reqs: PinnedRequirements,
qualifier: Qualifier
) -> None:
self.write_reqs(reqs,
file_name=f'requirements{qualifier.extension}.trans.txt',
type='transitive')
Expand Down
6 changes: 5 additions & 1 deletion scripts/manifest_404.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,11 @@ def filter_projects():
url = config.service_endpoint.set(path='/index/projects')
resp = requests.get(url=str(url), params=dict(size=1000))
projects = resp.json()['termFacets']['project']['terms']
return [one(project['projectId']) for project in projects if project['term'] != 'Tabula Muris']
return [
one(project['projectId'])
for project in projects
if project['term'] != 'Tabula Muris'
]


project_ids = filter_projects()
Expand Down
12 changes: 8 additions & 4 deletions scripts/provision_credentials.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ def provision_google(self, build, email, secret_name):

@classmethod
def _random_hmac_key(cls):
# Even though an HMAC key can be any sequence of bytes, we restrict to base64 in order to encode as string
# Even though an HMAC key can be any sequence of bytes, we restrict to
# base64 in order to encode as string
key = base64.encodebytes(os.urandom(48)).decode().replace('=', '').replace('\n', '')
assert len(key) == 64
return json.dumps({'key': key, 'key_id': str(uuid.uuid4())})
Expand Down Expand Up @@ -106,7 +107,8 @@ def _create_service_account_creds(self, service_account_email):
key = iam.projects().serviceAccounts().keys().create(
name='projects/-/serviceAccounts/' + service_account_email, body={}
).execute()
logger.info("Successfully created service account key for user '%s'", service_account_email)
logger.info("Successfully created service account key for user '%s'",
service_account_email)
return parse_google_key(key)

def _destroy_aws_secrets_manager_secret(self, secret_name):
Expand All @@ -116,7 +118,8 @@ def _destroy_aws_secrets_manager_secret(self, secret_name):
ForceDeleteWithoutRecovery=True
)
except self.secrets_manager.exceptions.ResourceNotFoundException:
logger.info('AWS secret %s does not exist. No changes will be made.', secret_name)
logger.info('AWS secret %s does not exist. No changes will be made.',
secret_name)
else:
assert response['Name'] == secret_name
# AWS docs recommend waiting for ResourceNotFoundException:
Expand Down Expand Up @@ -144,7 +147,8 @@ def _destroy_service_account_creds(self, service_account_email, secret_name):
SecretId=config.secrets_manager_secret_name(secret_name)
)
except self.secrets_manager.exceptions.ResourceNotFoundException:
logger.info('Secret already deleted, cannot get key_id for %s', service_account_email)
logger.info('Secret already deleted, cannot get key_id for %s',
service_account_email)
return
else:
key_id = json.loads(creds['SecretString'])['private_key_id']
Expand Down
3 changes: 2 additions & 1 deletion scripts/rename_resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ def main(argv: list[str]):
else:
if new_name is None:
if args.dry_run:
log.info('Found %r, would be removing it from the Terraform state', current_name)
log.info('Found %r, would be removing it from the Terraform state',
current_name)
else:
log.info('Found %r, removing it from the Terraform state', current_name)
terraform.run('state', 'rm', current_name)
Expand Down
16 changes: 13 additions & 3 deletions scripts/scrape_aws_service_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,20 @@

program_name, _ = os.path.splitext(os.path.basename(__file__))

output_file_path = os.path.join(config.project_root, 'terraform', 'gitlab', 'aws_service_model.json.gz')
output_file_path = os.path.join(config.project_root,
'terraform',
'gitlab',
'aws_service_model.json.gz')


@lru_cache(maxsize=1000)
def get(url_path):
assert url_path.startswith('/')
cache_file_path = os.path.join(config.project_root, '.cache', program_name, url_path[1:], 'cache.json')
cache_file_path = os.path.join(config.project_root,
'.cache',
program_name,
url_path[1:],
'cache.json')
if os.path.exists(cache_file_path):
with open(cache_file_path, 'r', encoding='utf-8') as f:
return json.load(f)
Expand All @@ -41,7 +48,10 @@ def get(url_path):
response_json = response.json()
cache_dir_path = os.path.dirname(cache_file_path)
os.makedirs(cache_dir_path, exist_ok=True)
f = tempfile.NamedTemporaryFile(mode='w+', dir=cache_dir_path, encoding='utf-8', delete=False)
f = tempfile.NamedTemporaryFile(mode='w+',
dir=cache_dir_path,
encoding='utf-8',
delete=False)
try:
json.dump(response_json, f, indent=4)
except BaseException:
Expand Down
26 changes: 19 additions & 7 deletions src/azul/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,10 @@ def dss_deployment_stage(self, dss_endpoint: str) -> str:
def dss_direct_access(self) -> bool:
return self._boolean(self.environ['AZUL_DSS_DIRECT_ACCESS'])

def dss_direct_access_role(self, lambda_name: str, stage: Optional[str] = None) -> Optional[str]:
def dss_direct_access_role(self,
lambda_name: str,
stage: Optional[str] = None
) -> Optional[str]:
key = 'AZUL_DSS_DIRECT_ACCESS_ROLE'
try:
role_arn = self.environ[key]
Expand Down Expand Up @@ -909,7 +912,10 @@ def is_hca_enabled(self, catalog: Optional[str] = None) -> bool:
def is_anvil_enabled(self, catalog: Optional[str] = None) -> bool:
return self._is_plugin_enabled('anvil', catalog)

def _is_plugin_enabled(self, plugin_prefix: str, catalog: Optional[str]) -> bool:
def _is_plugin_enabled(self,
plugin_prefix: str,
catalog: Optional[str]
) -> bool:
def predicate(catalog):
return any(
plugin.name.split('_')[0] == plugin_prefix
Expand All @@ -929,7 +935,11 @@ def integration_test_catalogs(self) -> Mapping[CatalogName, Catalog]:
if catalog.is_integration_test_catalog
}

def es_index_name(self, catalog: CatalogName, entity_type: str, aggregate: bool) -> str:
def es_index_name(self,
catalog: CatalogName,
entity_type: str,
aggregate: bool
) -> str:
return str(IndexName(prefix=self._index_prefix,
version=2,
deployment=self.deployment_stage,
Expand Down Expand Up @@ -1032,7 +1042,8 @@ class BrowserSite(TypedDict):
real_path: str

@property
def browser_sites(self) -> Mapping[str, Mapping[str, Mapping[str, BrowserSite]]]:
def browser_sites(self
) -> Mapping[str, Mapping[str, Mapping[str, BrowserSite]]]:
import json
return json.loads(self.environ['azul_browser_sites'])

Expand Down Expand Up @@ -1148,9 +1159,10 @@ def aggregation_lambda_timeout(self, *, retry: bool) -> int:

api_gateway_timeout = 29

# The number of seconds to extend the timeout of a Lambda fronted by API Gateway so that API Gateway times out
# before the Lambda. We pad the Lambda timeout so we get consistent behaviour. Without this padding we'd have a
# race between the Lambda being killed and API Gateway timing out.
# The number of seconds to extend the timeout of a Lambda fronted by
# API Gateway so that API Gateway times out before the Lambda. We pad the
# Lambda timeout so we get consistent behaviour. Without this padding we'd
# have a race between the Lambda being killed and API Gateway timing out.
#
api_gateway_timeout_padding = 2

Expand Down
15 changes: 12 additions & 3 deletions src/azul/azulclient.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,15 +131,20 @@ def reindex(self, catalog: CatalogName, prefix: str) -> int:
self.index(catalog, notifications)
return len(notifications)

def index(self, catalog: CatalogName, notifications: Iterable[JSON], delete: bool = False):
def index(self,
catalog: CatalogName,
notifications: Iterable[JSON],
delete: bool = False
):
errors = defaultdict(int)
missing = []
indexed = 0
total = 0
path = (catalog, 'delete' if delete else 'add')
indexer_url = config.indexer_endpoint.set(path=path)

with ThreadPoolExecutor(max_workers=self.num_workers, thread_name_prefix='pool') as tpe:
with ThreadPoolExecutor(max_workers=self.num_workers,
thread_name_prefix='pool') as tpe:

def attempt(notification, i):
log_args = (indexer_url, notification, i)
Expand Down Expand Up @@ -181,7 +186,11 @@ def handle_future(future):
for future in futures:
handle_future(future)

printer = PrettyPrinter(stream=None, indent=1, width=80, depth=None, compact=False)
printer = PrettyPrinter(stream=None,
indent=1,
width=80,
depth=None,
compact=False)
logger.info("Sent notifications for %i of %i bundles for catalog %r.",
indexed, total, catalog)
if errors:
Expand Down
3 changes: 2 additions & 1 deletion src/azul/chalice.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,8 @@ def _controller(self, controller_cls: Type[C], **kwargs) -> C:
return controller_cls(app=self, **kwargs)

def swagger_ui(self) -> Response:
swagger_ui_template = self.load_static_resource('swagger', 'swagger-ui.html.template.mustache')
swagger_ui_template = self.load_static_resource('swagger',
'swagger-ui.html.template.mustache')
base_url = self.base_url
redirect_url = furl(base_url).add(path='oauth2_redirect')
deployment_url = furl(base_url).add(path='openapi')
Expand Down
10 changes: 6 additions & 4 deletions src/azul/changelog.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,14 @@ def title_first(item):

def write_changes(output_dir_path):
"""
Write the change log as a Python literal to a module in the given directory. We're using Python syntax because it
can be looked up and loaded very easily. See changes().
Write the change log as a Python literal to a module in the given directory.
We're using Python syntax because it can be looked up and loaded very
easily. See changes().
"""
with write_file_atomically(os.path.join(output_dir_path, module_name + '.py')) as f:
# Write each change as a single line. I tried pprint() but it reorders the keys in dictionaries and its line
# wrapping algorithm is creating a non-uniform output.
# Write each change as a single line. I tried pprint() but it reorders
# the keys in dictionaries and its line wrapping algorithm is creating a
# non-uniform output.
f.write(variable_name + ' = [\n')
for change in changelog()[variable_name]:
f.write(' ' + repr(change) + ',\n')
Expand Down
3 changes: 2 additions & 1 deletion src/azul/collections.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ def dict_merge(dicts: Iterable[Mapping]) -> Mapping:
V = TypeVar('V')


def explode_dict(d: Mapping[K, Union[V, list[V], set[V], tuple[V]]]) -> Iterable[dict[K, V]]:
def explode_dict(d: Mapping[K, Union[V, list[V], set[V], tuple[V]]]
) -> Iterable[dict[K, V]]:
"""
An iterable of dictionaries, one dictionary for every possible combination
of items from iterable values in the argument dictionary. Only instances of
Expand Down
10 changes: 8 additions & 2 deletions src/azul/compliance/fedramp_inventory_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,11 @@ class Mapper(metaclass=ABCMeta):
def map(self, resource: ResourceConfig) -> Iterable[InventoryRow]:
raise NotImplementedError

def _common_fields(self, resource: ResourceConfig, *, id_suffix: Optional[str] = None) -> dict:
def _common_fields(self,
resource: ResourceConfig,
*,
id_suffix: Optional[str] = None
) -> dict:
return dict(
asset_tag=resource.name,
location=resource.region,
Expand Down Expand Up @@ -496,7 +500,9 @@ def get_resources(self) -> Iterator[ResourceConfig]:
yield from map(ResourceConfig.from_response, items)
resource_keys = response['unprocessedResourceKeys']

def get_inventory(self, resources: Iterable[ResourceConfig]) -> Iterable[InventoryRow]:
def get_inventory(self,
resources: Iterable[ResourceConfig]
) -> Iterable[InventoryRow]:
rows_by_mapper: defaultdict[Mapper, list[InventoryRow]] = defaultdict(list)
resource_counts = Counter()
row_counts = Counter()
Expand Down
6 changes: 5 additions & 1 deletion src/azul/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,11 @@ def dss_checkout_bucket(self, dss_endpoint: str) -> str:
return self._dss_bucket(dss_endpoint, 'checkout', lambda_name='service')

@_cache
def _dss_bucket(self, dss_endpoint: str, *qualifiers: str, lambda_name: str) -> str:
def _dss_bucket(self,
dss_endpoint: str,
*qualifiers: str,
lambda_name: str
) -> str:
with self.direct_access_credentials(dss_endpoint, lambda_name):
stage = config.dss_deployment_stage(dss_endpoint)
name = f'/dcp/dss/{stage}/environment'
Expand Down
Loading

0 comments on commit 5746788

Please sign in to comment.