Skip to content

Commit

Permalink
unit-test-and-ci-updates
Browse files Browse the repository at this point in the history
  • Loading branch information
Philip Adenekan committed Jan 21, 2022
1 parent a9bb879 commit 2d81289
Show file tree
Hide file tree
Showing 15 changed files with 436 additions and 222 deletions.
23 changes: 23 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# EditorConfig is awesome: https://EditorConfig.org

# top-most EditorConfig file
root = true

# Unix-style newlines with a newline ending every file
[*]
end_of_line = lf
insert_final_newline = true

# Matches multiple files with brace expansion notation
# Set default charset
[*.{js,py}]
charset = utf-8

# 4 space indentation
[*.py]
indent_style = space
indent_size = 4

# Tab indentation (no size specified)
[Makefile]
indent_style = tab
23 changes: 23 additions & 0 deletions .github/workflows/pylint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
name: Pylint

on: [push]

jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.6"]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pylint
- name: Analysing the code with pylint
run: |
pylint rec_to_binaries/
25 changes: 25 additions & 0 deletions .github/workflows/unit_test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: Project Tests

on: [push]

jobs:
build:
runs-on: ubuntu-latest
defaults:
run:
shell: bash -l {0}
strategy:
matrix:
python-version: ["3.6"]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- uses: conda-incubator/setup-miniconda@v2
with:
miniconda-version: "latest"
activate-environment: rec_to_binaries
environment-file: environment.yml
- run: pytest -vv
44 changes: 44 additions & 0 deletions .pylintrc
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#options - https://github.com/Qiskit/openqasm/blob/master/.pylintrc
# https://pylint.pycqa.org/en/latest/technical_reference/features.html


[MESSAGES CONTROL]
disable=
missing-docstring,
too-few-public-methods,
too-many-instance-attributes,
line-too-long,
too-many-arguments,
logging-fstring-interpolation,
consider-using-f-string, # consider removing in the future
import-error, # consider removing after finding a better solution


[TYPECHECK]
ignored-modules = numpy, pandas, scipy.stats


[BASIC]
# Good variable names which should always be accepted, separated by a comma
good-names=_, id

# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata

# Regular expression matching correct attribute names
attr-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$

# Naming hint for attribute names
attr-name-hint=[A-Za-z_][A-Za-z0-9_]{0,30}$

# Regular expression matching correct argument names
argument-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$

# Naming hint for argument names
argument-name-hint=[A-Za-z_][A-Za-z0-9_]{0,30}$

# Regular expression matching correct variable names
variable-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$

# Naming hint for variable names
variable-name-hint=[A-Za-z_][A-Za-z0-9_]{0,30}$
5 changes: 5 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"python.linting.pylintEnabled": true,
"python.linting.enabled": true,
"python.linting.flake8Enabled": false
}
1 change: 1 addition & 0 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ dependencies:
- pytest
- pytest-cov
- coveralls
- Faker
- pip
- pip:
- mountainlab-pytools
18 changes: 9 additions & 9 deletions rec_to_binaries/adjust_timestamps.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@

import numpy as np
import pandas as pd
from scipy.stats import linregress
from rec_to_binaries.create_system_time import infer_systime
from rec_to_binaries.read_binaries import (readTrodesExtractedDataFile,
write_trodes_extracted_datafile)
from scipy.stats import linregress

logger = getLogger(__name__)

Expand Down Expand Up @@ -53,28 +53,28 @@ def _regress_timestamps(trodestime, systime):
Unix time
"""
NANOSECONDS_TO_SECONDS = 1E9
nanoseconds_to_seconds = 1E9

# Convert
systime_seconds = np.asarray(systime).astype(
np.float64) / NANOSECONDS_TO_SECONDS
np.float64) / nanoseconds_to_seconds
trodestime_index = np.asarray(trodestime).astype(np.float64)

slope, intercept, r_value, p_value, std_err = linregress(
slope, intercept = linregress(
trodestime_index, systime_seconds)
adjusted_timestamps = intercept + slope * trodestime_index
return (adjusted_timestamps * NANOSECONDS_TO_SECONDS).astype(np.int64)
return (adjusted_timestamps * nanoseconds_to_seconds).astype(np.int64)


def _insert_new_data(data_file, df):
def _insert_new_data(data_file, data_frame):
"""
Replaces the `data` in the extracted data file with a new one.
Parameters
----------
data_file : dict
Original data file as read in by `readTrodesExtractedDataFile`
df : pandas.DataFrame
data_frame : pandas.DataFrame
New data
Returns
Expand All @@ -84,7 +84,7 @@ def _insert_new_data(data_file, df):
"""
new_data_file = data_file.copy()
new_data_file['data'] = np.asarray(df.to_records(index=False))
new_data_file['data'] = np.asarray(data_frame.to_records(index=False))
new_data_file['fields'] = ''.join(
[f'<{name} {dtype}>'
for name, (dtype, _) in new_data_file['data'].dtype.fields.items()])
Expand Down Expand Up @@ -113,7 +113,7 @@ def fix_timestamp_lag(continuoustime_filename):
data_file = readTrodesExtractedDataFile(continuoustime_filename)

if 'systime' not in data_file['data'].dtype.names:
logger.warn("No `systime`. Inferring from `system_time_at_creation` timestamp"
logger.warning("No `systime`. Inferring from `system_time_at_creation` timestamp"
" as a function of the `clockrate` and `trodestime`")
new_data = infer_systime(data_file)
else:
Expand Down
19 changes: 10 additions & 9 deletions rec_to_binaries/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def extract_trodes_rec_file(data_dir,
parallel_instances=1,
use_day_config=True,
trodes_version=None):
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
"""Extracting Trodes rec files.
Following the Frank Lab directory structure for raw ephys data, will
Expand Down Expand Up @@ -205,11 +206,11 @@ def extract_trodes_rec_file(data_dir,
use_day_config=use_day_config)

if adjust_timestamps_for_mcu_lag:
''''There is some jitter in the arrival times of packets from the MCU (as
reflected in the sysclock records in the .rec file. If we assume that
the Trodes clock is actually regular, and that any episodes of lag are
fairly sporadic, we can recover the correspondence between trodestime
and system (wall) time.'''
# There is some jitter in the arrival times of packets from the MCU (as
# reflected in the sysclock records in the .rec file. If we assume that
# the Trodes clock is actually regular, and that any episodes of lag are
# fairly sporadic, we can recover the correspondence between trodestime
# and system (wall) time.
preprocessing_dir = animal_info.get_preprocessing_dir()
filenames = glob.glob(os.path.join(
preprocessing_dir, '**', '*.continuoustime.dat'), recursive=True)
Expand Down Expand Up @@ -243,9 +244,7 @@ def convert_binaries_to_hdf5(data_dir, animal, out_dir=None, dates=None,
convert_lfp=True,
convert_pos=True,
convert_spike=True):
animal_info = td.TrodesAnimalInfo(
data_dir, animal, out_dir=out_dir, dates=dates)
"""Converting preprocessed binaries into HDF5 files.
'''Converting preprocessed binaries into HDF5 files.
Assume that preprocessing has already been completed using (for example)
extract_trodes_rec_file.
Expand All @@ -266,7 +265,9 @@ def convert_binaries_to_hdf5(data_dir, animal, out_dir=None, dates=None,
convert_lfps : bool, optional
convert_dio : bool, optional
convert_mda : bool, optional
"""
'''
animal_info = td.TrodesAnimalInfo(
data_dir, animal, out_dir=out_dir, dates=dates)

importer = td.TrodesPreprocessingToAnalysis(animal_info)

Expand Down
4 changes: 2 additions & 2 deletions rec_to_binaries/create_system_time.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,14 +41,14 @@ def create_systime(clockrate, data, system_time_at_creation):
millisecond
"""
NANOSECONDS_TO_SECONDS = 1e9
nanoseconds_to_seconds = 1e9

clockrate = int(clockrate)
n_time = data.shape[0]
system_time_at_creation = pd.to_datetime(
int(system_time_at_creation), unit='ms').value
end = (system_time_at_creation +
int((n_time - 1) * NANOSECONDS_TO_SECONDS / clockrate))
int((n_time - 1) * nanoseconds_to_seconds / clockrate))

systime = pd.date_range(
start=system_time_at_creation,
Expand Down
18 changes: 9 additions & 9 deletions rec_to_binaries/read_binaries.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import numpy as np


def readTrodesExtractedDataFile(filename):
def readTrodesExtractedDataFile(filename): # pylint: disable=invalid-name
'''Read extracted trodes binary.
Parameters
Expand All @@ -20,24 +20,24 @@ def readTrodesExtractedDataFile(filename):
# Check if first line is start of settings block
if file.readline().decode().strip() != '<Start settings>':
raise Exception("Settings format not supported")
fieldsText = dict()
fields_text = {}
for line in file:
# Read through block of settings
line = line.decode().strip()
# filling in fields dict
if line != '<End settings>':
settings_name, setting = line.split(': ')
fieldsText[settings_name.lower()] = setting
fields_text[settings_name.lower()] = setting
# End of settings block, signal end of fields
else:
break
# Reads rest of file at once, using dtype format generated by parse_dtype()
try:
fieldsText['data'] = np.fromfile(
file, dtype=parse_dtype(fieldsText['fields']))
fields_text['data'] = np.fromfile(
file, dtype=parse_dtype(fields_text['fields']))
except KeyError:
fieldsText['data'] = np.fromfile(file)
return fieldsText
fields_text['data'] = np.fromfile(file)
return fields_text


def parse_dtype(fieldstr):
Expand All @@ -46,7 +46,7 @@ def parse_dtype(fieldstr):
Returns: np.dtype
'''
# Returns np.dtype from field string
sep = re.split('\s', re.sub(r"\>\<|\>|\<", ' ', fieldstr).strip())
sep = re.split('\s', re.sub(r"\>\<|\>|\<", ' ', fieldstr).strip()) # pylint: disable=anomalous-backslash-in-string
typearr = []

# Every two elemets is fieldname followed by datatype
Expand All @@ -56,7 +56,7 @@ def parse_dtype(fieldstr):
ftype = 'uint32'
# Finds if a <num>* is included in datatype
if '*' in sep[i + 1]:
temptypes = re.split('\*', sep[i + 1])
temptypes = re.split('\*', sep[i + 1]) # pylint: disable=anomalous-backslash-in-string
# Results in the correct assignment, whether str is num*dtype or dtype*num
ftype = temptypes[temptypes[0].isdigit()]
repeats = int(temptypes[temptypes[1].isdigit()])
Expand Down
Loading

0 comments on commit 2d81289

Please sign in to comment.