diff --git a/src/sorcha/lightcurves/base_lightcurve.py b/src/sorcha/lightcurves/base_lightcurve.py index 374bad08..23f2176a 100644 --- a/src/sorcha/lightcurves/base_lightcurve.py +++ b/src/sorcha/lightcurves/base_lightcurve.py @@ -64,7 +64,7 @@ def _log_error_message(self, error_msg: str) -> None: Parameters ---------- - error_msg : str + error_msg : string The string to be appended to the error log """ logger.error(error_msg) diff --git a/src/sorcha/lightcurves/identity_lightcurve.py b/src/sorcha/lightcurves/identity_lightcurve.py index 31be68ec..ad45a4f4 100644 --- a/src/sorcha/lightcurves/identity_lightcurve.py +++ b/src/sorcha/lightcurves/identity_lightcurve.py @@ -50,7 +50,7 @@ def name_id() -> str: Returns ------- - str + string Unique identifier for this light curve calculator """ return "identity" diff --git a/src/sorcha/modules/PPAddUncertainties.py b/src/sorcha/modules/PPAddUncertainties.py index 5ab8c0e7..67acb530 100755 --- a/src/sorcha/modules/PPAddUncertainties.py +++ b/src/sorcha/modules/PPAddUncertainties.py @@ -29,13 +29,15 @@ def degCos(x): """ Calculate cosine of an angle in degrees. - Parameters: + Parameters ----------- - x (float): angle in degrees. + x : float + angle in degrees. - Returns: + Returns ----------- - The cosine of x. + float + The cosine of x. """ return np.cos(x * np.pi / 180.0) @@ -45,13 +47,15 @@ def degSin(x): """ Calculate sine of an angle in degrees. - Parameters: + Parameters ----------- - x (float): angle in degrees. + x : float + angle in degrees. - Returns: + Returns ----------- - The sine of x. + float + The sine of x. """ return np.sin(x * np.pi / 180.0) @@ -62,19 +66,25 @@ def addUncertainties(detDF, configs, module_rngs, verbose=True): Generates astrometric and photometric uncertainties, and SNR. Uses uncertainties to randomize the photometry. Accounts for trailing losses. - Parameters: + Parameters ----------- - detDF (Pandas dataframe): Dataframe of observations. + detDF : Pandas dataframe) + Dataframe of observations. - configs (dictionary): dictionary of configurations from config file. + configs : dictionary + dictionary of configurations from config file. - module_rngs (PerModuleRNG): A collection of random number generators (per module). + module_rngs : PerModuleRNG + A collection of random number generators (per module). - Returns: - ----------- - detDF (Pandas dataframe): dataframe of observations, with new columns for observed - magnitudes, SNR, and astrometric/photometric uncertainties. + verbose: Boolean, optional + Verbose Logging Flag. Default = True + Returns + ----------- + detDF : Pandas dataframe + dataframe of observations, with new columns for observed + magnitudes, SNR, and astrometric/photometric uncertainties. """ pplogger = logging.getLogger(__name__) @@ -126,23 +136,48 @@ def uncertainties( """ Add astrometric and photometric uncertainties to observations. - Parameters: + Parameters ----------- - detDF (Pandas dataframe): dataframe containing observations. + detDF : Pandas dataframe + dataframe containing observations. - configs (dictionary): dictionary of configurations from config file. + configs : dictionary + dictionary of configurations from config file. - limMagName, seeingName, filterMagName, dra_name, ddec_name, dec_name (strings): column - names of the limiting magnitude, seeing, magnitude, RA rate, DEC rate and DEC. + limMagName : string, optional + pandas dataframe column name of the limiting magnitude. + Default = "fiveSigmaDepthAtSource" - Returns: - ----------- - astrSigDeg (numpy array): astrometric uncertainties in degrees. + seeingName : string, optional + pandas dataframe column name of the seeing + Default = "seeingFwhmGeom" + + filterMagName : string, optional + pandas dataframe column name of the object magnitude + Default = "TrailedSourceMag" - photometric_sigma (numpy array): photometric uncertainties in magnitude. + dra_name : string, optional + pandas dataframe column name of the object RA rate + Default = "AstRARate(deg/day)" + + ddec_name: string, optional + pandas dataframe column name of the object declination rate + Default = "AstDecRate(deg/day)" + + dec_name : string, optional + pandas dataframe column name of the object declination + Default = "AstDec(deg)" + + Returns + ----------- + astrSigDeg: numpy array + astrometric uncertainties in degrees. - SNR (numpy array): signal-to-noise ratio. + photometric_sigma: numpy array + photometric uncertainties in magnitude. + SNR: numpy array + signal-to-noise ratio. """ if configs.get("trailing_losses_on", False): @@ -165,47 +200,68 @@ def calcAstrometricUncertainty( mag, m5, nvisit=1, FWHMeff=700.0, error_sys=10.0, astErrCoeff=0.60, output_units="mas" ): """Calculate the astrometric uncertainty, for object catalog purposes. - The effective FWHMeff MUST BE given in miliarcsec (NOT arcsec!). - Systematic error, error_sys, must be given in miliarcsec. - The result corresponds to a single-coordinate uncertainty. - Note that the total astrometric uncertainty (e.g. relevant when - matching two catalogs) will be sqrt(2) times larger. - Default values for parameters are based on estimates for LSST. - Parameters: + + Parameters ----------- - mag (float/array of floats): magnitude of the observation. + mag : float or array of floats) + magnitude of the observation. - m5 (float/array of floats): 5-sigma limiting magnitude. + m5 : float or array of floats + 5-sigma limiting magnitude. - nvisit (int): number of visits to consider. + nvisit :int, optional + number of visits to consider. + Default = 1 - FWHMeff (float): effective Full Width at Half Maximum of Point Spread Function [mas]. + FWHMeff : float, optional + effective Full Width at Half Maximum of Point Spread Function [mas]. + Default = 700.0 - error_sys (float): systematic error [mas]. + error_sys : float, optional + systematic error [mas]. + Default = 10.0 - output_units (string): 'mas' (default): milliarcseconds, 'arcsec': arcseconds. + astErrCoeff : float, optional + Astrometric error coefficient + (see calcRandomAstrometricErrorPerCoord description). + Default = 0.60 - Returns: + output_units : string, optional + Default: "mas" (milliarcseconds) + other options: "arcsec" (arcseconds) + + Returns ----------- - astrom_error (float/array of floats): astrometric error. + astrom_error : float or array of floats) + astrometric error. - SNR (float/array of floats): signal to noise ratio. + SNR : float or array of floats) + signal to noise ratio. - error_rand (float/array of floats): random error. + error_rand : float or array of floats + random error. - Description: + Notes ------------ + + The effective FWHMeff MUST BE given in miliarcsec (NOT arcsec!). + Systematic error, error_sys, must be given in miliarcsec. + The result corresponds to a single-coordinate uncertainty. + Note that the total astrometric uncertainty (e.g. relevant when + matching two catalogs) will be sqrt(2) times larger. + Default values for parameters are based on estimates for LSST. + The astrometric error can be applied to parallax or proper motion (for nvisit>1). If applying to proper motion, should also divide by the # of years of the survey. This is also referenced in the LSST overview paper (arXiv:0805.2366, ls.st/lop) + - assumes sqrt(Nvisit) scaling, which is the best-case scenario - calcRandomAstrometricError assumes maxiumm likelihood solution, which is also the best-case scenario - the systematic error, error_sys = 10 mas, corresponds to the design spec from the LSST Science Requirements Document (ls.st/srd) - """ # first compute SNR @@ -229,26 +285,35 @@ def calcAstrometricUncertainty( def calcRandomAstrometricErrorPerCoord(FWHMeff, SNR, AstromErrCoeff=0.60): """Calculate the random astrometric uncertainty, as a function of - effective FWHMeff and signal-to-noise ratio SNR - Returns astrometric uncertainty in the same units as FWHM. + effective FWHMeff and signal-to-noise ratio SNR and return + the astrometric uncertainty in the same units as FWHM. + ** This error corresponds to a single-coordinate error ** the total astrometric uncertainty (e.g. relevant when matching two catalogs) will be sqrt(2) times larger. - Parameters: + Parameters ----------- - FWHMeff (float/array of floats): Effective Full Width at Half Maximum of Point Spread Function [mas]. + FWHMeff : float or array of floats + Effective Full Width at Half Maximum of Point Spread Function [mas]. - SNR (float/array of floats): Signal-to-noise ratio. + SNR : float or array of floats + Signal-to-noise ratio. - AstromErrCoeff (float): Astrometric error coefficient (see description below). + AstromErrCoeff : float, optional + Astrometric error coefficient (see description below). + Default =0.60 - Returns: + Returns ----------- - RandomAstrometricErrorPerCoord (float/array of floats): random astrometric uncertainty per coordinate. + RandomAstrometricErrorPerCoord: float or array of floats + random astrometric uncertainty per coordinate. - Description: + Returns astrometric uncertainty in the same units as FWHMeff. + + Notes ------------ + The coefficient AstromErrCoeff for Maximum Likelihood solution is given by @@ -281,14 +346,15 @@ def calcPhotometricUncertainty(snr): """ Convert flux signal to noise ratio to an uncertainty in magnitude. - Parameters: + Parameters ----------- - snr (float/array of floats): The signal-to-noise-ratio in flux. + snr : float or array of floats + The signal-to-noise-ratio in flux. - Returns: + Returns ----------- - magerr (float/array of floats): The resulting uncertainty in magnitude. - + magerr : float or rray of floats + The resulting uncertainty in magnitude. """ # see e.g. www.ucolick.org/~bolte/AY257/s_n.pdf section 3.1 diff --git a/src/sorcha/modules/PPApplyColourOffsets.py b/src/sorcha/modules/PPApplyColourOffsets.py index 83a65bf8..65b404c3 100644 --- a/src/sorcha/modules/PPApplyColourOffsets.py +++ b/src/sorcha/modules/PPApplyColourOffsets.py @@ -11,21 +11,30 @@ def PPApplyColourOffsets(observations, function, othercolours, observing_filters If phase model variables exist for each colour, this function also selects the correct variables for each observation based on filter. - Parameters: + Parameters ----------- - observations (Pandas dataframe): dataframe of observations. + observations: Pandas dataframe + dataframe of observations. - function (string): string of desired phase function model. Options are HG, HG12, HG1G2, linear, H. + function : string + string of desired phase function model. Options are HG, HG12, HG1G2, linear, H. - othercolours (list of strings): list of colour offsets present in input files. + othercolours : list of strings + list of colour offsets present in input files. - observing_filters (list of strings): list of observation filters of interest. + observing_filters : list of strings + list of observation filters of interest. - mainfilter (string): the main filter in which H is given and all colour offsets are calculated against. + mainfilter : string + the main filter in which H is given and all colour offsets are calculated against. - Returns: + Returns ----------- - observations (Pandas dataframe): dataframe of observations with H calculated in relevant filter. + observations : Pandas dataframe + observations dataframe modified with H calculated in relevant filter (H_filter) + and renames the column for H in the main filter as H_original. + The dataframe has also been modified to have the appropriate phase curve filter specific values/columns. + """ diff --git a/src/sorcha/modules/PPApplyFOVFilter.py b/src/sorcha/modules/PPApplyFOVFilter.py index 7e32f885..0eed296e 100755 --- a/src/sorcha/modules/PPApplyFOVFilter.py +++ b/src/sorcha/modules/PPApplyFOVFilter.py @@ -7,26 +7,35 @@ def PPApplyFOVFilter(observations, configs, module_rngs, footprint=None, verbose=False): """ - Wrapper function for PPFootprintFilter and PPFilterDetectionEfficiency. Checks to see + Wrapper function for PPFootprintFilter and PPFilterDetectionEfficiency that checks to see whether a camera footprint filter should be applied or if a simple fraction of the - circular footprint should be used, then applies the required filter. + circular footprint should be used, then applies the required filter where rows are + are removed from the inputted pandas dataframevfor moving objects that land outside of + their associated observation's footprint. - Parameters: + Parameters ----------- - observations (Pandas dataframe): dataframe of observations. + observations: Pandas dataframe + dataframe of observations. - configs (dictionary): dictionary of variables from config file. + configs : dictionary + dictionary of variables from config file. - module_rngs (PerModuleRNG): A collection of random number generators (per module). + module_rngs : PerModuleRNG + A collection of random number generators (per module). - footprint (Footprint): A Footprint object that represents the boundaries of - the detector(s). Default `None`. + footprint: Footprint + A Footprint class object that represents the boundaries of the detector(s). + Default: None. - verbose (boolean): Verbose mode on or off. + verbose: boolean + Controls whether logging in verbose mode is on or off. + Default: False - Returns: + Returns ----------- - observations (Pandas dataframe): dataframe of observations after FOV filters have been applied. + observations : Pandas dataframe + dataframe of observations updated after field-of-view filters have been applied. """ pplogger = logging.getLogger(__name__) @@ -59,20 +68,25 @@ def PPGetSeparation(obj_RA, obj_Dec, cen_RA, cen_Dec): """ Function to calculate the distance of an object from the field centre. - Parameters: + Parameters ----------- - obj_RA (float): RA of object in decimal degrees. + obj_RA : float + RA of object in decimal degrees. - obj_Dec (float): Dec of object in decimal degrees. + obj_Dec: float + Dec of object in decimal degrees. - cen_RA (float): RA of field centre in decimal degrees. + cen_RA : float + RA of field centre in decimal degrees. - cen_Dec (float): Dec of field centre in decimal degrees. + cen_Dec : float + Dec of field centre in decimal degrees. - Returns: + Returns ----------- - sep_degree (float): The separation of the object from the centre of the field, in decimal - degrees. + sep_degree : float + The separation of the object from the centre of the field, in decimal + degrees. """ @@ -89,16 +103,18 @@ def PPCircleFootprint(observations, circle_radius): Simple function which removes objects which lay outside of a circle of given radius centred on the field centre. - Parameters: + Parameters ----------- - observations (Pandas dataframe): dataframe of observations. + observations : Pandas dataframe + dataframe of observations. - circle_radius (float): radius of circle footprint in degrees. + circle_radius : float + radius of circle footprint in degrees. - Returns: + Returns ---------- - new_observations (Pandas dataframe): dataframe of observations with all lying - beyond the circle radius dropped. + new_observations : Pandas dataframe + dataframe of observations with all lying beyond the circle radius dropped. """ @@ -126,17 +142,23 @@ def PPSimpleSensorArea(ephemsdf, module_rngs, fillfactor=0.9): Randomly removes a number of observations proportional to the fraction of the field not covered by the detector. - Parameters: + Parameters ----------- - ephemsdf (Pandas dataframe): dataframe containing observations. + ephemsdf : Pandas dataframe + Dataframe containing observations. - module_rngs (PerModuleRNG): A collection of random number generators (per module). + module_rngs : PerModuleRNG + A collection of random number generators (per module). - fillfactor (float): fraction of FOV covered by the sensor. + fillfactor : float + fraction of FOV covered by the sensor. + Default = 0.9 - Returns: + Returns ---------- - ephemsOut (Pandas dataframe): dataframe of observations with fraction removed. + ephemsOut : Pandas dataframe + Dataframe of observations with 1- fillfactor fraction of objects + removed per on-sky observation pointing. """ # Set the module specific seed as an offset from the base seed. diff --git a/src/sorcha/modules/PPBrightLimit.py b/src/sorcha/modules/PPBrightLimit.py index 7c509c24..12a1ef51 100755 --- a/src/sorcha/modules/PPBrightLimit.py +++ b/src/sorcha/modules/PPBrightLimit.py @@ -8,17 +8,23 @@ def PPBrightLimit(observations, observing_filters, bright_limit): limit. Can take either a single saturation limit for a straight cut, or filter-specific saturation limits. - Parameters: + Parameters ----------- - observations (Pandas dataframe): dataframe of observations. + observations : Pandas dataframe + Dataframe of observations. - observing_filters (list of strings): observing filters present in the data. + observing_filters : list of strings + Observing filters present in the data. - bright_limit (float or list of floats): saturation limits: either single or per-filter. + bright_limit : float or list of floats + Saturation limits: either single value applied to all filters or a list of values for each filter. - Returns: + Returns ---------- - observations_out (Pandas dataframe): dataframe of filtered observations. + observations_out : Pandas dataframe + observations dataframe modified with rows dropped for apparent + magnitudes brigher than the bright_limit for the given observation's + filter """ diff --git a/src/sorcha/modules/PPCalculateApparentMagnitude.py b/src/sorcha/modules/PPCalculateApparentMagnitude.py index b1a3dca4..c68085f3 100644 --- a/src/sorcha/modules/PPCalculateApparentMagnitude.py +++ b/src/sorcha/modules/PPCalculateApparentMagnitude.py @@ -16,29 +16,44 @@ def PPCalculateApparentMagnitude( ): """This function applies the correct colour offset to H for the relevant filter, checks to make sure the correct columns are included (with additional functionality for colour-specific phase curves), - then calculates the apparent magnitude. + then calculates the trailed source apparent magnitude including optional adjustments for + cometary activity and rotational light curves. - Parameters: + Parameters ----------- - observations (Pandas dataframe): dataframe of observations. + observations : Pandas dataframe + dataframe of observations. - phasefunction (string): desired phase function model. Options are HG, HG12, HG1G2, linear, H. + phasefunction : string + Desired phase function model. Options are HG, HG12, HG1G2, linear, none - mainfilter (string): the main filter in which H is given and all colour offsets are calculated against. + mainfilter : string + The main filter in which H is given and all colour offsets are calculated against. - othercolours (list of strings): list of colour offsets present in input files. + othercolours : list of strings + List of colour offsets present in input files. - observing_filters (list of strings): list of observation filters of interest. + observing_filters : ist of strings + List of observation filters of interest. - cometary_activity_choice (string): type of object for cometary activity. Either 'comet' or 'none'. + cometary_activity_choice : string + Choice of cometary activity model. + Default = None - lc_choice (string): choice of lightcurve model. Default None + lc_choice : string + Choice of lightcurve model. Default = None - verbose (boolean): True/False trigger for verbosity. + verbose : boolean + Flag for turning on verbose logging. Default = False - Returns: + Returns ---------- - observations (Pandas dataframe): dataframe of observations with calculated magnitude column. + observations : Pandas dataframe + Modified observations pandas dataframe with calculated trailed source + apparent magnitude column, H calculated in relevant filter (H_filter), + renames the column for H in the main filter as H_original and + adds a column for the light curve contribution to the trailed source + apparent magnitude (if included) """ pplogger = logging.getLogger(__name__) diff --git a/src/sorcha/modules/PPCalculateApparentMagnitudeInFilter.py b/src/sorcha/modules/PPCalculateApparentMagnitudeInFilter.py index ca4e7555..27b54144 100755 --- a/src/sorcha/modules/PPCalculateApparentMagnitudeInFilter.py +++ b/src/sorcha/modules/PPCalculateApparentMagnitudeInFilter.py @@ -17,34 +17,47 @@ def PPCalculateApparentMagnitudeInFilter( cometary_activity_choice=None, ): """ - This task calculates the apparent brightness of an object at a given pointing - according to one of the following photometric phase function models: + The trailed source apparent magnitude is calculated in the filter for given H, + phase function, light curve, and cometary activity parameters. + + Notes + ------- + PPApplyColourOffsets should be run beforehand to apply any needed colour offset to H and ensure correct + variables are present. + + The phase function model options utlized are the sbpy package's implementation: - HG: Bowell et al. (1989) Asteroids II book. - HG1G2: Muinonen et al. (2010) Icarus 209 542. - HG12: Penttilä et al. (2016) PSS 123 117. - linear: (as implemented in sbpy) + - none : No model is applied - The apparent magnitude is calculated in the filter for which the H and - phase function variables are given. PPApplyColourOffsets should be - run beforehand to apply any needed colour offset to H and ensure correct - variables are present. - - The function makes use of implementations in the sbpy library. - Parameters: + Parameters ----------- - padain (Pandas dataframe): dataframe of observations. + padain : Pandas dataframe + Dataframe of observations. + + function : string + Desired phase function model. Options are "HG", "HG12", "HG1G2", "linear", "none". - function (string): desired phase function model. Options are HG, HG12, HG1G2, linear, none. + colname : string + Column name in which to store calculated magnitude to the padain dataframe. + Default = "TrailedSourceMag" - colname (string): column name in which to store calculated magnitude. + lightcurve_choice : stringm optional + Choice of light curve model. Default = None - lightcurve_choice (string): choice of lightcurve model. Default None + cometary_activity_choice : string, optional + Choice of cometary activity model. Default = None - Returns: + Returns ---------- - padain (Pandas dataframe): dataframe of observations with calculated magnitude column. + padain : Pandas dataframe + Dataframe of observations (padain) modified with calculated trailed + source apparent magnitude column and any optional cometary actvity or + light curve added columns based on the models used. """ diff --git a/src/sorcha/modules/PPCalculateSimpleCometaryMagnitude.py b/src/sorcha/modules/PPCalculateSimpleCometaryMagnitude.py index e0d5d35a..c9f16354 100755 --- a/src/sorcha/modules/PPCalculateSimpleCometaryMagnitude.py +++ b/src/sorcha/modules/PPCalculateSimpleCometaryMagnitude.py @@ -11,11 +11,8 @@ def PPCalculateSimpleCometaryMagnitude( alpha: List[float], activity_choice: str = None, ) -> pd.DataFrame: - """This task calculates the brightness of the comet at a given pointing - using the model specified by `activity_choice` - - The brightness is calculated first in the main filter, and the colour offset is - applied afterwards. + """Adjusts the observations' trailed source apparent magnitude for cometary activity + using the model specified by `activity_choice` added by the user Parameters ---------- @@ -30,13 +27,14 @@ def PPCalculateSimpleCometaryMagnitude( Distance to Earth [units au] alpha : List[float] Phase angle [units degrees] - activity_choice : str, optional + activity_choice : string, optional The activity model to use, by default None Returns ------- pd.DataFrame - The ``observations`` dataframe with updated brightness values. + The ``observations`` dataframe with updated trailed + source apparent magnitude values. """ if activity_choice and CA_METHODS.get(activity_choice, False): diff --git a/src/sorcha/modules/PPCheckInputObjectIDs.py b/src/sorcha/modules/PPCheckInputObjectIDs.py index 4221ea58..f861c646 100755 --- a/src/sorcha/modules/PPCheckInputObjectIDs.py +++ b/src/sorcha/modules/PPCheckInputObjectIDs.py @@ -6,20 +6,27 @@ def PPCheckInputObjectIDs(orbin, colin, poiin): """ Checks whether orbit and physical parameter files contain the same object IDs, and - additionally checks if the pointing database object iIDs is a subset of - all the object id:s found in the orbit/physical parameter files.} + additionally checks if the pointing database object IDs is a subset of + all the object ids found in the orbit/physical parameter files. - Parameters: + Parameters ----------- - orbin (Pandas dataframe): dataframe of orbital information. + orbin : Pandas dataframe + Dataframe of orbital information. - colin (Pandas dataframe): dataframe of physical parameters. + colin : Pandas dataframe + Dataframe of physical parameters. - poiin (Pandas dataframe): dataframe of pointing database. + poiin : Pandas dataframe + Dataframe of pointing database. - Returns: + Returns ---------- - None: will error out if a mismatch is found. + None + + Notes + ------- + Function will error out if a mismatch is found. """ diff --git a/src/sorcha/modules/PPCommandLineParser.py b/src/sorcha/modules/PPCommandLineParser.py index 3e548c91..1dc72c39 100644 --- a/src/sorcha/modules/PPCommandLineParser.py +++ b/src/sorcha/modules/PPCommandLineParser.py @@ -15,9 +15,9 @@ def warn_or_remove_file(filepath, force_remove, pplogger): Parameters ---------- - filepath : str + filepath : string The full file path to a given file. i.e. /home/data/output.csv - force_remove : bool + force_remove : boolean Whether to remove the file if it exists. pplogger : Logger Used to log the output. @@ -42,13 +42,15 @@ def PPCommandLineParser(args): Will only look for the comet parameters file if it's actually given at the command line. - Parameters: + Parameters ----------- - args (ArgumentParser object): argparse object of command line arguments + args : ArgumentParser object + argparse object of command line arguments - Returns: + Returns ---------- - cmd_args_dict (dictionary): dictionary of variables taken from command line arguments + cmd_args_dict : dictionary + dictionary of variables taken from command line arguments """ diff --git a/src/sorcha/modules/PPConfigParser.py b/src/sorcha/modules/PPConfigParser.py index 83ea6ebb..9e5cf330 100755 --- a/src/sorcha/modules/PPConfigParser.py +++ b/src/sorcha/modules/PPConfigParser.py @@ -13,8 +13,13 @@ def log_error_and_exit(message: str) -> None: Parameters ---------- - message : str + message : string The error message to be logged to the error output file. + + + Returns + -------- + None """ logging.error(message) @@ -26,17 +31,21 @@ def PPGetOrExit(config, section, key, message): Checks to see if the config file parser has a key. If it does not, this function errors out and the code stops. - Parameters: + Parameters ----------- - config (ConfigParser object): ConfigParser object containing configs. + config : ConfigParser + ConfigParser object containing configs. - section (string): section of the key being checked. + section : string + Section of the key being checked. - key (string): the key being checked. + key : string) + The key being checked. - message (string): the message to log and display if the key is not found. + message : string + The message to log and display if the key is not found. - Returns: + Returns ---------- None. @@ -54,17 +63,21 @@ def PPGetFloatOrExit(config, section, key, message): Checks to see if a key in the config parser is present and can be read as a float. If it cannot, this function errors out and the code stops. - Parameters: + Parameters ----------- - config (ConfigParser object): ConfigParser object containing configs. + config : ConfigParser + ConfigParser object containing configs. - section (string): section of the key being checked. + section : string + section of the key being checked. - key (string): the key being checked. + key : string + The key being checked. - message (string): the message to log and display if the key is not found. + message : string + The message to log and display if the key is not found. - Returns: + Returns ---------- None. @@ -91,17 +104,21 @@ def PPGetIntOrExit(config, section, key, message): Checks to see if a key in the config parser is present and can be read as an int. If it cannot, this function errors out and the code stops. - Parameters: + Parameters ----------- - config (ConfigParser object): ConfigParser object containing configs. + config : ConfigParser + ConfigParser object containing configs. - section (string): section of the key being checked. + section : string + Section of the key being checked. - key (string): the key being checked. + key : string + The key being checked. - message (string): the message to log and display if the key is not found. + message : string + The message to log and display if the key is not found. - Returns: + Returns ---------- None. @@ -128,17 +145,21 @@ def PPGetBoolOrExit(config, section, key, message): Checks to see if a key in the config parser is present and can be read as a Boolean. If it cannot, this function errors out and the code stops. - Parameters: + Parameters ----------- - config (ConfigParser object): ConfigParser object containing configs. + config : ConfigParser object + ConfigParser object containing configs. - section (string): section of the key being checked. + section : string + Section of the key being checked. - key (string): the key being checked. + key : string + The key being checked. - message (string): the message to log and display if the key is not found. + message : string + The message to log and display if the key is not found. - Returns: + Returns ---------- None. @@ -162,24 +183,30 @@ def PPGetValueAndFlag(config, section, key, type_wanted): type and error-handling if it can't be forced. If the value is not present in the config fie, the flag is set to False; if it is, the flag is True. - Parameters: + Parameters ----------- - config (ConfigParser object): ConfigParser object containing configs. + config : ConfigParser + ConfigParser object containing configs. - section (string): section of the key being checked. + section : string + Section of the key being checked. - key (string): the key being checked. + key : string + The key being checked. - type_wanted (string): the type the value should be forced to. Accepts int, - float, none (for no type-forcing). + type_wanted : string + The type the value should be forced to. + Accepts int, float, none (for no type-forcing). - Returns: + Returns ---------- - value (any type): the value of the key, with type dependent on type_wanted. - Will be None if the key is not present. + value : any type + The value of the key, with type dependent on type_wanted. + Will be None if the key is not present. - flag (Boolean): will be False if the key is not present in the config file - and True if it is. + flag : boolean + Will be False if the key is not present in the config file + and True if it is. """ @@ -221,16 +248,18 @@ def PPFindFileOrExit(arg_fn, argname): """Checks to see if a file given by a filename exists. If it doesn't, this fails gracefully and exits to the command line. - Parameters: + Parameters ----------- - arg_fn (string): the filepath/name of the file to be checked. + arg_fn : string + The filepath/name of the file to be checked. - argname (string): the name of the argument being checked. Used for error - message. + argname : string + The name of the argument being checked. Used for error message. - Returns: + Returns ---------- - arg_fn (string): the filepath/name of the file to be checked. + arg_fn : string + The filepath/name of the file to be checked. """ @@ -247,17 +276,18 @@ def PPFindDirectoryOrExit(arg_fn, argname): """Checks to see if a directory given by a filepath exists. If it doesn't, this fails gracefully and exits to the command line. - Parameters: + Parameters ----------- - arg_fn (string): the filepath of the directory to be checked. + arg_fn : string + The filepath of the directory to be checked. - argname (string): the name of the argument being checked. Used for error - message. + argname : string + The name of the argument being checked. Used for error message. - Returns: + Returns ---------- - arg_fn (string): the filepath of the directory to be checked. - + arg_fn : string + The filepath of the directory to be checked. """ pplogger = logging.getLogger(__name__) @@ -272,20 +302,25 @@ def PPFindDirectoryOrExit(arg_fn, argname): def PPCheckFiltersForSurvey(survey_name, observing_filters): """ When given a list of filters, this function checks to make sure they exist in the - user-selected survey. Currently only has options for LSST, but can be expanded upon - later. If the filters given in the config file do not match the survey filters, - the function exits the program with an error. + user-selected survey, and if the filters given in the config file do not match the + survey filters, the function exits the program with an error. - Parameters: + Parameters ----------- - survey_name (string): survey name. Currently only "LSST", "lsst" accepted. + survey_name : string + Survey name. Currently only "LSST", "lsst" accepted. - observing_filters (list of strings): observation filters of interest. + observing_filters: list of strings + Observation filters of interest. - Returns: + Returns ---------- None. + Notes + ------- + Currently only has options for LSST, but can be expanded upon later. + """ pplogger = logging.getLogger(__name__) @@ -315,20 +350,23 @@ def PPConfigFileParser(configfile, survey_name): Parses the config file, error-handles, then assigns the values into a single dictionary, which is passed out. - Chose not to use the original ConfigParser object for readability: it's a dict of - dicts, so calling the various values can become quite unwieldy. - - This could easily be broken up even more, and probably should be. - - Parameters: + Parameters ----------- - configfile (string): filepath/name of config file. + configfile : string + Filepath/name of config file. - survey_name (string): survey name. Currently only "LSST", "lsst" accepted. + survey_name : string + Survey name. Currently only "LSST", "lsst" accepted. - Returns: + Returns ---------- - config_dict (dictionary): dictionary of config file variables. + config_dict : dictionary + Dictionary of config file variables. + + Notes + ------- + We chose not to use the original ConfigParser object for readability: it's a dict of + dicts, so calling the various values can become quite unwieldy. """ pplogger = logging.getLogger(__name__) @@ -712,13 +750,15 @@ def PPPrintConfigsToLog(configs, cmd_args): """ Prints all the values from the config file and command line to the log. - Parameters: + Parameters ----------- - configs (dictionary): dictionary of config file variables. + configs : dictionary + Dictionary of config file variables. - cmd_args (dictionary): dictionary of command line arguments. + cmd_args : dictionary + Dictionary of command line arguments. - Returns: + Returns ---------- None. diff --git a/src/sorcha/modules/PPDetectionEfficiency.py b/src/sorcha/modules/PPDetectionEfficiency.py index 09f5f1d8..dd630a8b 100644 --- a/src/sorcha/modules/PPDetectionEfficiency.py +++ b/src/sorcha/modules/PPDetectionEfficiency.py @@ -10,18 +10,21 @@ def PPDetectionEfficiency(padain, threshold, module_rngs): threshold: if the threshold is 0.95, for example, 5% of observations will be randomly dropped. Used by PPLinkingFilter. - Parameters: + Parameters ----------- - padain (Pandas dataframe): dataframe of observations. + padain : Pandas dataframe + Dataframe of observations. - threshold (float): Fraction between 0 and 1 of detections retained in the dataframe. + threshold : float + Fraction between 0 and 1 of detections retained in the dataframe. - module_rngs (PerModuleRNG): A collection of random number generators (per module). + module_rngs : PerModuleRNG + A collection of random number generators (per module). - Returns: + Returns ---------- - padain_drop: dataframe of observations with a fraction equal to 1-threshold - randomly dropped. + : Pandas dataframe + Dataframe of observations with a fraction equal to 1-threshold randomly dropped. """ diff --git a/src/sorcha/modules/PPDetectionProbability.py b/src/sorcha/modules/PPDetectionProbability.py index 044e4319..e02c1d42 100755 --- a/src/sorcha/modules/PPDetectionProbability.py +++ b/src/sorcha/modules/PPDetectionProbability.py @@ -26,19 +26,24 @@ def calcDetectionProbability(mag, limmag, fillFactor=1.0, w=0.1): limiting magnitude, and fill factor, determined by the fading function from Veres & Chesley (2017). - Parameters: + Parameters ----------- - mag (float/array of floats): magnitude of object in filter used for that field. + mag : float or array of floats + Magnitude of object in filter used for that field. - limmag (float/array of floats): limiting magnitude of the field. + limmag : float or array of floats + Limiting magnitude of the field. - fillFactor (float): fraction of FOV covered by the camera sensor. + fillFactor : float), optional + Fraction of FOV covered by the camera sensor. Default = 1.0 - w (float): distribution parameter. + w : float + Distribution parameter. Default = 0.1 - Returns: + Returns ---------- - P (float/array of floats): probability of detection. + P : float or array of floats + Probability of detection. """ P = fillFactor / (1.0 + np.exp((mag - limmag) / w)) @@ -61,22 +66,35 @@ def PPDetectionProbability( Wrapper for calcDetectionProbability which takes into account column names and trailing losses. Used by PPFadingFunctionFilter. - Parameters: + Parameters ----------- - oif_df (Pandas dataframe): dataframe of observations. + oif_df : Pandas dataframe + Dataframe of observations. - trailing_losses (Boolean): are trailing losses being applied? + trailing_losses : Boolean, optional + Are trailing losses being applied?, Default = False - *_name (string): Column names for trailing losses, magnitude, limiting magnitude - and field ID respectively. + trailing_loss_name : string, optional + oif_df column name for trailing losses, Default = dmagDetect - fillFactor (float): fraction of FOV covered by the camera sensor. + magnitude_name : string, optional + oif_df column name for observation limiting magnitude, + Default = fiveSigmaDepthAtSource - w (float): distribution parameter. + field ID : string, optional + oif_df column name for observation field_id + Default = FieldID - Returns: + fillFactor : float, optional + Fraction of FOV covered by the camera sensor. Default = 1.0 + + w : float + Distribution parameter. Default =0.1 + + Returns ---------- - P (float/array of floats): probability of detection. + : float or array of floats + Probability of detection. """ diff --git a/src/sorcha/modules/PPDropObservations.py b/src/sorcha/modules/PPDropObservations.py index 44bd5fbb..5ad2f56c 100644 --- a/src/sorcha/modules/PPDropObservations.py +++ b/src/sorcha/modules/PPDropObservations.py @@ -6,17 +6,21 @@ def PPDropObservations(observations, module_rngs, probability="detection probabi Drops rows where the probabilty of detection is less than sample drawn from a uniform distribution. Used by PPFadingFunctionFilter. - Parameters: + Parameters ----------- - observations (Pandas dataframe): dataframe of observations with a column containing the probability of detection. + observations : Pandas dataframe + Dataframe of observations with a column containing the probability of detection. - module_rngs (PerModuleRNG): A collection of random number generators (per module). + module_rngs : PerModuleRNG + A collection of random number generators (per module). - probability (string): name of column containing detection probability. + probability : string + Name of column containing detection probability. - Returns: + Returns ---------- - out (Pandas dataframe): new dataframe without observations that could not be observed. + out : Pandas dataframe + New dataframe of 'observations' modified to remove observations that could not be observed. """ # Set the module specific seed as an offset from the base seed. diff --git a/src/sorcha/modules/PPFadingFunctionFilter.py b/src/sorcha/modules/PPFadingFunctionFilter.py index af424362..cb0449e7 100644 --- a/src/sorcha/modules/PPFadingFunctionFilter.py +++ b/src/sorcha/modules/PPFadingFunctionFilter.py @@ -12,18 +12,24 @@ def PPFadingFunctionFilter(observations, fillfactor, width, module_rngs, verbose Calculates detection probability based on a fading function, then drops rows where the probabilty of detection is less than sample drawn from a uniform distribution. - Parameters: + Parameters ----------- - observations (Pandas dataframe): dataframe of observations with a column containing the probability of detection. + observations : Pandas dataframe + Dataframe of observations with a column containing the probability of detection. - fillFactor (float): fraction of FOV covered by the camera sensor. + fillFactor : float + Fraction of camera field-of-view covered by detectors - module_rngs (PerModuleRNG): A collection of random number generators (per module). + module_rngs : PerModuleRNG + A collection of random number generators (per module). - Returns: - ---------- - observations_drop (Pandas dataframe): new dataframe without observations that could not be observed. + verbose : boolean, optional + Verbose logging flag. Default = False + Returns + ---------- + observations_drop : Pandas dataframe) + Modified 'observations' dataframe without observations that could not be observed. """ pplogger = logging.getLogger(__name__) diff --git a/src/sorcha/modules/PPFootprintFilter.py b/src/sorcha/modules/PPFootprintFilter.py index a935678a..d4f7fa3f 100644 --- a/src/sorcha/modules/PPFootprintFilter.py +++ b/src/sorcha/modules/PPFootprintFilter.py @@ -35,21 +35,27 @@ def distToSegment(points, x0, y0, x1, y1): units as the points are specified in (radians, degrees, etc.). Uses planar geometry for the calculations (assuming small angular distances). - Parameters: + Parameters ----------- - points (array): array of shape (2, n) describing the corners of the sensor. + points : array + Array of shape (2, n) describing the corners of the sensor. - x0 (float): the x coordinate of the first end of the segment. + x0 : float + The x coordinate of the first end of the segment. - y0 (float): the y coordinate of the first end of the segment. + y0 : float + The y coordinate of the first end of the segment. - x1 (float): the x coordinate of the second end of the segment. + x1 : float + The x coordinate of the second end of the segment. - y1 (float): the y coordinate of the second end of the segment. + y1 : float + The y coordinate of the second end of the segment. - Returns: + Returns -------- - dist (array): array of length n storing the distances. + dist : array + Array of length n storing the distances. """ # Handle the case where the segment is a point: (x0 == x1) and (y0 == y1) len_sq = (x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0) @@ -68,22 +74,28 @@ def distToSegment(points, x0, y0, x1, y1): class Detector: + """Detector class""" + def __init__(self, points, ID=0, units="radians"): """ Initiates a detector object. - Parameters: + Parameters ----------- - points (array): array of shape (2, n) describing the corners of the sensor. + points : array + Array of shape (2, n) describing the corners of the sensor. - ID (int): an integer ID for the sensor. + ID : int, optional + Aan integer ID for the sensor. Default =0. - units (string): units that points is provided in, "radians" or "degrees" from - center of the focal plane. + units : string, optional + Units that points is provided in, "radians" or "degrees" from + center of the focal plane. Default = "radians" - Returns: + Returns ---------- - detector (Detector): a detector instance. + detector : Detector + A Detector object instance. """ @@ -115,21 +127,26 @@ def ison(self, point, ϵ=10.0 ** (-11), edge_thresh=None, plot=False): Determines whether a point (or array of points) falls on the detector. - Parameters: + Parameters ----------- - point (array): array of shape (2, n) for n points. + point : array + Array of shape (2, n) for n points. - ϵ (float): threshold for whether point is on detector. + ϵ : float, optional + Threshold for whether point is on detector. Default: 10.0 ** (-11) - edge_thresh (float, optional): The focal plane distance (in arcseconds) from - the detector's edge for a point to be counted. Removes points that are too - close to the edge for source detection. + edge_thresh: float, optional + The focal plane distance (in arcseconds) from the detector's edge + for a point to be counted. Removes points that are too + close to the edge for source detection. Default = None - plot (Boolean): whether to plot the detector and the point. + plot : Boolean, optional + Flag for whether to plot the detector and the point. Default = False - Returns: + Returns ---------- - ison (array): indices of points in point array that fall on the sensor. + selectedidx : array + Indices of points in point array that fall on the sensor. """ pplogger = logging.getLogger(__name__) @@ -183,13 +200,14 @@ def trueArea(self): segmentedArea, but the test point is the mean of the corner coordinates. Will probably fail if the sensor is not convex. - Parameters: + Parameters ----------- None. - Returns: + Returns ---------- - area (float): the area of the detector. + area : float + The area of the detector. """ x = self.x - self.centerx @@ -210,14 +228,15 @@ def segmentedArea(self, point): Fails if the point is not inside the sensor or if the sensor is not convex. - Parameters: + Parameters ----------- - None. + point : array + Array of shape (2, n) for n points. - Returns: + Returns ---------- - area (float): the area of the detector. - + area : float + The area of the detector. """ # so that both a single and many points work @@ -247,11 +266,11 @@ def sortCorners(self): Sorts the corners to be counterclockwise by angle from center of the detector. Modifies self. - Parameters: + Parameters ----------- None. - Returns: + Returns ---------- None. @@ -269,13 +288,15 @@ def rotateDetector(self, θ): Rotates a sensor around the origin of the coordinate system its corner locations are provided in. - Parameters: + Parameters ----------- - θ (float): angle to rotate by, in radians. + θ : float + Angle to rotate by, in radians. - Returns: + Returns ---------- - Detector: new Detector instance. + Detector: Detector + New Detector instance. """ @@ -293,11 +314,11 @@ def rad2deg(self): """ Converts corners from radians to degrees. - Parameters: + Parameters ----------- None. - Returns: + Returns ---------- None. @@ -316,11 +337,11 @@ def deg2rad(self): """ Converts corners from degrees to radians. - Parameters: + Parameters ----------- None. - Returns: + Returns ---------- None. @@ -343,18 +364,22 @@ def plot(self, θ=0.0, color="gray", units="rad", annotate=False): internal demonstration purposes, but not for confirming algorithms or for offical plots. - Parameters: + Parameters ----------- - θ (float): angle to rotate footprint by, radians or degrees. + θ : float, optional + Aangle to rotate footprint by, radians or degrees. Default =0.0 - color (string): line color. + color :string, optional + Line color. Default = "gray" - units (string): units θ is provided in ("deg" or "rad"). + units: string, optional + Units. Units is provided in ("deg" or "rad"). Default = 'rad'. - annotate (Boolean): whether to annotate each sensor with its index in - self.detectors. + annotate : Boolean + Flag whether to annotate each sensor with its index in self.detectors. + Default = False - Returns: + Returns ---------- None. @@ -377,20 +402,26 @@ def plot(self, θ=0.0, color="gray", units="rad", annotate=False): class Footprint: + + """Camera footprint class""" + def __init__(self, path=None, detectorName="detector"): """ Initiates a Footprint object. - Parameters: + Parameters ----------- - path (string): path to a .csv file containing detector corners. + path : string, optional + Path to a .csv file containing detector corners. Default = None - detectorName (string): name of column in detector file indicating to - which sensor a corner belongs. + detectorName : string, optional + Name of column in detector file indicating to which sensor a + corner belongs. Default = "detector" - Returns: + Returns ---------- - Footprint: Footprint object for the provided sensors. + Footprint : Footprint + Footprint object for the provided sensors. """ @@ -441,18 +472,22 @@ def plot(self, θ=0.0, color="gray", units="rad", annotate=False): is <2.1 degrees), so should be fine for internal demonstration purposes, but not for confirming algorithms or for offical plots. - Parameters: + Parameters ----------- - θ (float): angle to rotate footprint by, radians or degrees. + θ : float, optional + Angle to rotate footprint by, radians or degrees. Default = 0.0 - color (string): line color. + color : string, optional + Line color. Default = "gray" - units (string): units θ is provided in ("deg" or "rad"). + units : string, optional + Units θ is provided in ("deg" or "rad"). Default = "rad" - annotate (Boolean): whether to annotate each sensor with its index in - self.detectors. + annotate : boolean, optional + Whether to annotate each sensor with its index in + self.detectors. Default = False - Returns: + Returns ---------- None. @@ -477,23 +512,42 @@ def applyFootprint( footprint. Also returns the an ID for the sensor a detection is made on. - Parameters: + Parameters ----------- - field_df (Pandas dataframe): dataframe containing detection information with pointings. + field_df : Pandas dataframe + Dataframe containing detection information with pointings. - *_name (string): column names for object RA and Dec and field name. + ra_name : string, optional + "field_df" dataframe's column name for object's RA + for the given observation. Default = "AstRA(deg)" [units: degrees] - *_name_field (string): column names for field RA and Dec and rotation. + dec_name : string, optional + "field_df" dataframe's column name for object's declination + for the given observation. Default = "AstDec(deg)" [units: dgrees] - edge_thresh (float, optional): An angular threshold in arcseconds for dropping pixels too - close to the edge. + ra_name_field : string, optional + "field_df" dataframe's column name for the observation field's RA + Default = "fieldRA" [units: degrees] - Returns: + dec_name_field : string, optional + "field_df" dataframe's column name for the observation field's declination + Default = "fieldDec" [Units: degrees] + + rot_name_field: string, optional + "field_df" dataframe's column name for the observation field's rotation angle + Default = "rotSkyPos" [Units: degrees] + + edge_thresh: float, optional + An angular threshold in arcseconds for dropping pixels too close to the edge. + Default = None + + Returns ---------- - detected (array): indices of rows in oifDF which fall on the sensor(s). + detected : array + Indices of rows in oifDF which fall on the sensor(s). - detectorID (array): index corresponding to a detector in - self.detectors for each entry in detected. + detectorID : array + Index corresponding to a detector in self.detectors for each entry in detected. """ @@ -543,22 +597,32 @@ def radec2focalplane(ra, dec, fieldra, fielddec, fieldID=None): the same focal plane, but does not account for field rotation. Maintains alignment with the meridian passing through the field center. - Parameters: + Parameters ----------- - ra (float/array of floats): observation Right Ascension, radians. + ra : float or array of floats + Observation Right Ascension, radians. - dec (float/array of floats): observation Declination, radians. + dec : float/array of floats + Observation Declination, radians. - fieldra (float/array of floats): field pointing Right Ascension, radians. + fieldra : float or array of floats + Field pointing Right Ascension, radians. - fielddec (float/array of floats): field pointing Declination, radians. + fielddec : float/array of floats) + Field pointing Declination, radians. - fieldID (float/array of floats): Field ID, optional. + fieldID : float/array of floats + Field ID, optional. Default = None - Returns: + Returns ---------- - x, y (float/array of floats): Coordinates on the focal plane, radians projected - to the plane tangent to the unit sphere. + x : float or array of floats + x coordinates on the flocal plane, radians projected to the plane tagent ot the + unit sphere + + y : float/array of floats + y coordinates on the focal plane, radians projected to the plane tangent to the + unit sphere. """ diff --git a/src/sorcha/modules/PPGetLogger.py b/src/sorcha/modules/PPGetLogger.py index 9f0dc813..a63143c3 100644 --- a/src/sorcha/modules/PPGetLogger.py +++ b/src/sorcha/modules/PPGetLogger.py @@ -13,21 +13,28 @@ def PPGetLogger( """ Initialises log and error files. - Parameters: + Parameters ----------- - log_location (string): filepath to directory in which to save logs. + log_location : string + Filepath to directory in which to save logs. - log_format (string): format for log filename. + log_format : string, optional + Format for log filename. + Default = "%(asctime)s %(name)-12s %(levelname)-8s %(message)s " - log_name (string): name of log. + log_name : string, optional + Name of log. Default = "" - log_file_info (string): name with which to save info log. + log_file_info : string, optional + Name with which to save info log. Default = "sorcha.log" - log_file_error (string): name with which to save error log. + log_file_error : string, optional + Name with which to save error log. Default = "sorcha.err" - Returns: + Returns ---------- - log (logging object): log object. + log : logging object + Log object. """ diff --git a/src/sorcha/modules/PPGetMainFilterAndColourOffsets.py b/src/sorcha/modules/PPGetMainFilterAndColourOffsets.py index 174e8d47..426660a1 100644 --- a/src/sorcha/modules/PPGetMainFilterAndColourOffsets.py +++ b/src/sorcha/modules/PPGetMainFilterAndColourOffsets.py @@ -9,25 +9,31 @@ def PPGetMainFilterAndColourOffsets(filename, observing_filters, filesep): the expected colour offsets. Also makes sure that columns exist for all the expected colour offsets in the physical parameters file. - The main filter should be found as a column heading of H_[mainfilter]. If - this format isn't followed, this function will error out. - - Parameters: + Parameters ----------- - filename (string): the filename of the physical parameters file. + filename : string + The filename of the physical parameters file. - observing_filters (list of strings): the observation filters requested - in the config file. + observing_filters : list of strings + The observation filters requested in the configuration file. - filesep (string): the format of the physical parameters file. Should be "csv"/"comma" - or "whitespace". + filesep : string + The format of the physical parameters file. Should be "csv"/"comma" + or "whitespace". - Returns: + Returns ---------- - mainfilter (string): the main filter in which H is defined. + mainfilter : string + The main filter in which H is defined. + + colour_offsets : list of strings + A list of the colour offsets present in the physical parameters file. - colour_offsets (list of strings): a list of the colour offsets present in the - physical parameters file. + Notes + ------ + + The main filter should be found as a column heading of H_[mainfilter]. If + this format isn NOT followed, this function will error out. """ diff --git a/src/sorcha/modules/PPJoinEphemeridesAndOrbits.py b/src/sorcha/modules/PPJoinEphemeridesAndOrbits.py index 7ebf5744..ab9cfc12 100755 --- a/src/sorcha/modules/PPJoinEphemeridesAndOrbits.py +++ b/src/sorcha/modules/PPJoinEphemeridesAndOrbits.py @@ -8,15 +8,18 @@ def PPJoinEphemeridesAndOrbits(padafr, padaor): dataframe has to have same ObjIDs: NaNs will populate the fields for the missing objects. - Parameters: + Parameters ----------- - padafr (Pandas dataframe): dataframe of ephemerides/OIF output. + padafr : Pandas dataframe + Dataframe of ephemerides output. - padaor (Pandas dataframe): dataframe of orbital information. + padaor : Pandas dataframe + Dataframe of orbital information. - Returns: + Returns ---------- - resdf (Pandas dataframe): joined dataframe. + resdf : Pandas dataframe + Joined dataframe of "padafr" and "padaor" """ diff --git a/src/sorcha/modules/PPJoinEphemeridesAndParameters.py b/src/sorcha/modules/PPJoinEphemeridesAndParameters.py index 69c6cb49..d6c76b5a 100755 --- a/src/sorcha/modules/PPJoinEphemeridesAndParameters.py +++ b/src/sorcha/modules/PPJoinEphemeridesAndParameters.py @@ -4,15 +4,18 @@ def PPJoinEphemeridesAndParameters(padafr, padacl): dataframe. Each database has to have same ObjIDs: NaNs will be populate the fields for the missing objects. - Parameters: + Parameters ----------- - padafr (Pandas dataframe): dataframe of ephemerides/OIF output. + padafr : Pandas dataframe: + Dataframe of ephemerides output. - padacl (Pandas dataframe): dataframe of physical parameters information. + padacl : Pandas dataframe + Dataframe of physical parameters information. - Returns: + Returns ---------- - resdf (Pandas dataframe): joined dataframe. + resdf : Pandas dataframe + Joined dataframe of "padafr" and "padacl" """ diff --git a/src/sorcha/modules/PPMagnitudeLimit.py b/src/sorcha/modules/PPMagnitudeLimit.py index d9745219..54e39543 100644 --- a/src/sorcha/modules/PPMagnitudeLimit.py +++ b/src/sorcha/modules/PPMagnitudeLimit.py @@ -1,17 +1,21 @@ def PPMagnitudeLimit(observations, mag_limit): """ - Filter that performs a straight magnitude cut based on a defined limit. + Filter that performs a straight cut on apparent PSF magnitude + based on a defined threshold. - Parameters: + Parameters ----------- - observations (Pandas dataframe) dataframe of observations. Must have - "observedTrailedSourceMag" column. + observations : pandas dataframe + Dataframe of observations. Must have "observedPSFMag" column. - mag_limit (float): limit for magnitude cut. + mag_limit : float + Limit for apparent magnitude cut. - Returns: + Returns ----------- - observations (Pandas dataframe): same as input, but with entries fainter than the limit removed. + observations : pandas dataframe + "observations" dataframe modified with apparent PSF mag greater than + or equal to the limit removed. """ diff --git a/src/sorcha/modules/PPMatchPointingToObservations.py b/src/sorcha/modules/PPMatchPointingToObservations.py index 192cf921..956ac7cb 100644 --- a/src/sorcha/modules/PPMatchPointingToObservations.py +++ b/src/sorcha/modules/PPMatchPointingToObservations.py @@ -10,16 +10,19 @@ def PPMatchPointingToObservations(padain, pointfildb): database onto the observations dataframe, then drops all observations which are not in one of the requested filters and any duplicate columns. - Parameters: + Parameters ----------- - padain (Pandas dataframe): dataframe of observations. + padain : pandas dataframe + Dataframe of observations. - pointfildb (Pandas dataframe): dataframe of the pointing database. + pointfildb : pandas dataframe + Dataframe of the pointing database. - Returns: + Returns ----------- - res_df (Pandas dataframe): Merged dataframe of observations with pointing - database, with all superfluous observations dropped. + res_df : Pandas dataframe + Merged dataframe of observations ("padain") with pointing + database ("pointfildb"), with all superfluous observations dropped. """ diff --git a/src/sorcha/modules/PPModuleRNG.py b/src/sorcha/modules/PPModuleRNG.py index 1c4855ff..dd3b6164 100755 --- a/src/sorcha/modules/PPModuleRNG.py +++ b/src/sorcha/modules/PPModuleRNG.py @@ -6,10 +6,11 @@ class PerModuleRNG: """A collection of per-module random number generators.""" def __init__(self, base_seed, pplogger=None): - """Parameters: + """Parameters -------------- - base_seed (int): The base seed for a random number generator + base_seed : int + The base seed for a random number generator """ self._base_seed = base_seed self._rngs = {} @@ -25,13 +26,15 @@ def getModuleRNG(self, module_name): Return a random number generator that is based on a base seed and the current module name. - Parameters: + Parameters ----------- - module_name (str): The name of the module + module_name : string + The name of the module - Returns: + Returns ---------- - rng (numpy Generator): The random number generator. + rng : numpy Generator + The random number generator. """ if module_name in self._rngs: return self._rngs[module_name] diff --git a/src/sorcha/modules/PPOutput.py b/src/sorcha/modules/PPOutput.py index 64ce75ca..e430cc92 100755 --- a/src/sorcha/modules/PPOutput.py +++ b/src/sorcha/modules/PPOutput.py @@ -12,13 +12,15 @@ def PPOutWriteCSV(padain, outf): """ Writes a pandas dataframe out to a CSV file at a location given by the user. - Parameters: + Parameters ----------- - padain (Pandas dataframe): dataframe of output. + padain : pandas dataframe + Dataframe of output. - outf (string): location to which file should be written. + outf : string + Location to which file should be written. - Returns: + Returns ----------- None. @@ -33,15 +35,18 @@ def PPOutWriteHDF5(pp_results, outf, keyin): """ Writes a pandas dataframe out to a HDF5 file at a location given by the user. - Parameters: + Parameters ----------- - padain (Pandas dataframe): dataframe of output. + padain : pandas dataframe + Dataframe of output. - outf (string): location to which file should be written. + outf : string + Location to which file should be written. - keyin (string): key at which data will be located. + keyin : string + Key at which data will be located. - Returns: + Returns ----------- None. @@ -64,13 +69,15 @@ def PPOutWriteSqlite3(pp_results, outf): """ Writes a pandas dataframe out to a CSV file at a location given by the user. - Parameters: + Parameters ----------- - pp_results (Pandas dataframe): dataframe of output. + pp_results : pandas dataframe + Dataframe of output. - outf (string): location to which file should be written. + outf : string + Location to which file should be written. - Returns: + Returns ----------- None. @@ -88,19 +95,25 @@ def PPWriteOutput(cmd_args, configs, observations_in, endChunk=0, verbose=False) Writes the output in the format specified in the config file to a location specified by the user. - Parameters: + Parameters ----------- - cmd_args (dictionary): dictonary of command line arguments. + cmd_args : dictionary + Dictonary of command line arguments. - configs (dictionary): dictionary of config file arguments. + configs : Dictionary + Dictionary of config file arguments. - observations_in (Pandas dataframe): dataframe of output. + observations_in : Pandas dataframe + Dataframe of output. - endChunk (int): integer of last object in chunk. Used only for HDF5 output key. + endChunk : integer, optional + Integer of last object in chunk. Used only for HDF5 output key. + Default = 0 - verbose (Boolean): verbose mode on or off. + verbose : boolean, optional + Verbose logging mode on or off. Default = False - Returns: + Returns ----------- None. diff --git a/src/sorcha/modules/PPRandomizeMeasurements.py b/src/sorcha/modules/PPRandomizeMeasurements.py index 79de11c7..95176466 100644 --- a/src/sorcha/modules/PPRandomizeMeasurements.py +++ b/src/sorcha/modules/PPRandomizeMeasurements.py @@ -37,22 +37,24 @@ def randomizeAstrometry( Randomize astrometry with a normal distribution around the actual RADEC pointing. The randomized values are added to the input pandas data frame. - Parameters: + Parameters ----------- - df (Pandas dataframe): dataframe containing astrometry and sigma. + df : pandas dataframe + Dataframe containing astrometry and sigma. - module_rngs (PerModuleRNG): A collection of random number generators (per module). + module_rngs : PerModuleRNG + A collection of random number generators (per module). *Name (string): column names for right ascension, declination, randomized right ascension, randomized declination, and standard deviation. *Units (string): units for RA and Dec and sigma ('deg'/'rad'/'mas'). - Returns: + Returns ----------- df (Pandas dataframe): as input, with randomized RADEC columns added - Comments: + Notes ----------- Covariances in RADEC are currently not supported. The routine calculates a normal distribution on the unit sphere, so as to allow for a correct modeling of diff --git a/src/sorcha/readers/CSVReader.py b/src/sorcha/readers/CSVReader.py index 83818d9e..2f9de978 100755 --- a/src/sorcha/readers/CSVReader.py +++ b/src/sorcha/readers/CSVReader.py @@ -92,7 +92,7 @@ def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): block_start=2 would skip the first two lines after the header and return data starting on row=2. [Default=0] - block_size int, optional, default=None + block_size: int, optional, default=None The number of rows to read in. Use block_size=None to read in all available data. diff --git a/src/sorcha/readers/CombinedDataReader.py b/src/sorcha/readers/CombinedDataReader.py index a9da3c28..f3796f26 100755 --- a/src/sorcha/readers/CombinedDataReader.py +++ b/src/sorcha/readers/CombinedDataReader.py @@ -25,8 +25,14 @@ def __init__(self, ephem_primary=False, **kwargs): """ Parameters ---------- - ephem_primary (bool, optional): Use the ephemeris reader as the primary + ephem_primary: bool, optional + Use the ephemeris reader as the primary reader. Otherwise uses the first auxiliary data reader. + Default = false + + **kwargs : dict, optional + Extra arguments + """ self.ephem_reader = None self.aux_data_readers = [] @@ -38,7 +44,8 @@ def add_ephem_reader(self, new_reader): Parameters ---------- - new_reader (ObjectDataReader): The reader for a specific input file. + new_reader : ObjectDataReader + The reader for a specific input file. """ pplogger = logging.getLogger(__name__) if self.ephem_reader is not None: @@ -51,7 +58,8 @@ def add_aux_data_reader(self, new_reader): Parameters ---------- - new_reader (ObjectDataReader): The reader for a specific input file. + new_reader : ObjectDataReader + The reader for a specific input file. """ self.aux_data_readers.append(new_reader) @@ -59,17 +67,24 @@ def read_block(self, block_size=None, verbose=False, **kwargs): """Reads in a set number of rows from the input, performs post-processing and validation, and returns a data frame. - Parameters: + Parameters ----------- - block_size (int, optional): the number of rows to read in. + block_size: int, optional + the number of rows to read in. Use block_size=None to read in all available data. - [Default = None] + Default = None - verbose (bool, optional): use verbose logging. + verbose : bool, optional + Use verbose logging. + Default = None - Returns: + **kwargs : dict, optional + Extra arguments + + Returns ----------- - res_df (Pandas dataframe): dataframe of the combined object data. + res_df : Pandas dataframe + dataframe of the combined object data. """ pplogger = logging.getLogger(__name__) @@ -143,19 +158,24 @@ def read_aux_block(self, block_size=None, verbose=False, **kwargs): This function DOES NOT include the ephemeris data in the returned data frame. It is to be used when generating the ephemeris during the execution of Sorcha. - Parameters: + Parameters ----------- - block_size (int, optional): the number of rows to read in. + block_size : int, optional + the number of rows to read in. Use block_size=None to read in all available data. [Default = None] - verbose (bool, optional): use verbose logging. + verbose : bool, optional + use verbose logging. + Default = False - Returns: - ----------- - res_df (Pandas dataframe): dataframe of the combined object data, excluding - any ephemeris data. + **kwargs : dict, optional + Extra arguments + Returns + ----------- + res_df : Pandas dataframe + dataframe of the combined object data, excluding any ephemeris data. """ pplogger = logging.getLogger(__name__) verboselog = pplogger.info if verbose else lambda *a, **k: None diff --git a/src/sorcha/readers/DatabaseReader.py b/src/sorcha/readers/DatabaseReader.py index 47005db1..a8eb9b7d 100755 --- a/src/sorcha/readers/DatabaseReader.py +++ b/src/sorcha/readers/DatabaseReader.py @@ -8,6 +8,13 @@ # NOTE: this was written for a now-defunct functionality, but has been left # in the code as a database reader class may be useful later. +""" +!!!!!!!!!!!!!!!! +This class is Not currently used in Sorcha. This was written for a now-defunct functionality, +but have kept this class in case it may be useful in future iterations of the codebase. +!!!!!!!!!!!!!!!! +""" + class DatabaseReader(ObjectDataReader): """A class to read in object data stored in a sqlite database.""" @@ -15,9 +22,10 @@ class DatabaseReader(ObjectDataReader): def __init__(self, intermdb, **kwargs): """A class for reading the object data from a sqlite database. - Parameters: + Parameters ----------- - intermdb (string): filepath/name of temporary database. + intermdb : string + filepath/name of temporary database. """ super().__init__(**kwargs) self.intermdb = intermdb @@ -26,35 +34,38 @@ def get_reader_info(self): """Return a string identifying the current reader name and input information (for logging and output). - Returns: + Returns -------- - name (str): The reader information. + name : string + The reader information. """ return f"DatabaseReader:{self.intermdb}" def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): """Reads in a set number of rows from the input. - Parameters: + Parameters ----------- - block_start (int, optional): The 0-indexed row number from which + block_start : int, optional + The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header and return data starting on row=2. [Default=0] - block_size (int, optional): the number of rows to read in. + block_size : int, optional + the number of rows to read in. Use block_size=None to read in all available data. A non-None block size must be provided if block_start > 0. - validate_data (bool, optional): if True then checks the data for - NaNs or nulls. + validate_data : bool, optional + if True then checks the data for NaNs or nulls. - Returns: + Returns ---------- - res_df (Pandas dataframe): dataframe of the object data. - + res_df : Pandas dataframe + dataframe of the object data. - Notes: + Notes ------ A non-None block size must be provided if block_start > 0. """ @@ -74,13 +85,15 @@ def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): def _read_objects_internal(self, obj_ids, **kwargs): """Read in a chunk of data for given object IDs. - Parameters: + Parameters ----------- - obj_ids (list): A list of object IDs to use. + obj_ids : list + A list of object IDs to use. - Returns: + Returns ----------- - res_df (Pandas dataframe): The dataframe for the object data. + res_df : Pandas dataframe + The dataframe for the object data. """ con = sqlite3.connect(self.intermdb) prm_list = ", ".join("?" for _ in obj_ids) @@ -93,19 +106,21 @@ def _process_and_validate_input_table(self, input_table, **kwargs): """Perform any input-specific processing and validation on the input table. Modifies the input dataframe in place. - Note - ---- + Notes + ------ The base implementation includes filtering that is common to most input types. Subclasses should call super.process_and_validate() to ensure that the ancestor’s validation is also applied. - Parameters: + Parameters ----------- - input_table (Pandas dataframe): A loaded table. + input_table : Pandas dataframe + A loaded table. - Returns: + Returns ----------- - input_table (Pandas dataframe): Returns the input dataframe modified in-place. + input_table : Pandas dataframe + Returns the input dataframe modified in-place. """ # Perform the parent class's validation (checking object ID column). input_table = super()._process_and_validate_input_table(input_table, **kwargs) diff --git a/src/sorcha/readers/HDF5Reader.py b/src/sorcha/readers/HDF5Reader.py index 2b56e387..a559eb17 100755 --- a/src/sorcha/readers/HDF5Reader.py +++ b/src/sorcha/readers/HDF5Reader.py @@ -9,9 +9,10 @@ class HDF5DataReader(ObjectDataReader): def __init__(self, filename, **kwargs): """A class for reading the object data from an HDF5 file. - Parameters: + Parameters ----------- - filename (string): location/name of the data file. + filename : string + location/name of the data file. """ super().__init__(**kwargs) self.filename = filename @@ -24,30 +25,33 @@ def get_reader_info(self): """Return a string identifying the current reader name and input information (for logging and output). - Returns: + Returns -------- - name (str): The reader information. + name : string + The reader information. """ return f"HDF5DataReader:{self.filename}" def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): """Reads in a set number of rows from the input. - Parameters: + Parameters ----------- - block_start (int, optional): The 0-indexed row number from which + block_start : int, optional + The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header and return data starting on row=2. [Default=0] - block_size (int, optional): the number of rows to read in. + block_size : int, optional + the number of rows to read in. Use block_size=None to read in all available data. [Default = None] - validate_data (bool, optional): if True then checks the data for - NaNs or nulls. + validate_data : bool, optional + if True then checks the data for NaNs or nulls. - Returns: + Returns ----------- res_df (Pandas dataframe): dataframe of the object data. """ @@ -74,13 +78,15 @@ def _build_id_map(self): def _read_objects_internal(self, obj_ids, **kwargs): """Read in a chunk of data for given object IDs. - Parameters: + Parameters ----------- - obj_ids (list): A list of object IDs to use. + obj_ids : list + A list of object IDs to use. - Returns: + Returns ----------- - res_df (Pandas dataframe): The dataframe for the object data. + res_df : Pandas dataframe + The dataframe for the object data. """ self._build_id_map() row_match = self.obj_id_table["ObjID"].isin(obj_ids) @@ -92,19 +98,21 @@ def _process_and_validate_input_table(self, input_table, **kwargs): """Perform any input-specific processing and validation on the input table. Modifies the input dataframe in place. - Note - ---- + Notes + ------ The base implementation includes filtering that is common to most input types. Subclasses should call super.process_and_validate() to ensure that the ancestor’s validation is also applied. - Parameters: + Parameters ----------- - input_table (Pandas dataframe): A loaded table. + input_table : Pandas dataframe + A loaded table. - Returns: + Returns ----------- - input_table (Pandas dataframe): Returns the input dataframe modified in-place. + input_table : Pandas dataframe + Returns the input dataframe modified in-place. """ # Perform the parent class's validation (checking object ID column). input_table = super()._process_and_validate_input_table(input_table, **kwargs) diff --git a/src/sorcha/readers/OIFReader.py b/src/sorcha/readers/OIFReader.py index 39de6dff..e85b2a0e 100755 --- a/src/sorcha/readers/OIFReader.py +++ b/src/sorcha/readers/OIFReader.py @@ -20,11 +20,13 @@ class OIFDataReader(ObjectDataReader): def __init__(self, filename, inputformat, **kwargs): """A class for reading the object data from a CSV file. - Parameters: + Parameters ----------- - filename (string): location/name of the data file. + filename : string + location/name of the data file. - inputformat (string): format of input file ("whitespace"/"comma"/"csv"/"h5"/"hdf5"). + inputformat : string + format of input file ("whitespace"/"comma"/"csv"/"h5"/"hdf5"). """ super().__init__(**kwargs) @@ -46,29 +48,33 @@ def get_reader_info(self): """Return a string identifying the current reader name and input information (for logging and output). - Returns: + Returns -------- - name (str): The reader information. + name : string + The reader information. """ return f"OIFDataReader|{self.reader.get_reader_info()}" def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): """Reads in a set number of rows from the input. - Parameters: + Parameters ----------- - block_start (int, optional): The 0-indexed row number from which + block_start : int, optional + The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header and return data starting on row=2. [Default=0] - block_size (int, optional): the number of rows to read in. + block_size : int, optional + the number of rows to read in. Use block_size=None to read in all available data. [Default = None] - Returns: + Returns ----------- - res_df (Pandas dataframe): dataframe of the object data. + res_df : Pandas dataframe + dataframe of the object data. """ res_df = self.reader.read_rows(block_start, block_size, **kwargs) @@ -78,13 +84,15 @@ def _read_objects_internal(self, obj_ids, **kwargs): """Read in a chunk of data corresponding to all rows for a given set of object IDs. - Parameters: + Parameters ----------- - obj_ids (list): A list of object IDs to use. + obj_ids : list + A list of object IDs to use. - Returns: + Returns ----------- - res_df (Pandas dataframe): The dataframe for the object data. + res_df : Pandas dataframe + The dataframe for the object data. """ res_df = self.reader.read_objects(obj_ids, **kwargs) return res_df @@ -93,19 +101,21 @@ def _process_and_validate_input_table(self, input_table, **kwargs): """Perform any input-specific processing and validation on the input table. Modifies the input dataframe in place. - Note - ---- + Notes + ----- The base implementation includes filtering that is common to most input types. Subclasses should call super.process_and_validate() to ensure that the ancestor’s validation is also applied. - Parameters: + Parameters ----------- - input_table (Pandas dataframe): A loaded table. + input_table : Pandas dataframe + A loaded table. - Returns: + Returns ----------- - input_table (Pandas dataframe): Returns the input dataframe modified in-place. + input_table : Pandas dataframe + Returns the input dataframe modified in-place. """ # We do not call reader.process_and_validate_input_table() or # super().process_and_validate_input_table() because reader's read functions have @@ -159,15 +169,18 @@ def _process_and_validate_input_table(self, input_table, **kwargs): def read_full_oif_table(filename, inputformat): """A helper function for testing that reads and returns an entire OIF table. - Parameters: + Parameters ----------- - filename (string): location/name of the data file. + filename : string + location/name of the data file. - inputformat (string): format of input file ("whitespace"/"comma"/"csv"/"h5"/"hdf5"). + inputformat : string + format of input file ("whitespace"/"comma"/"csv"/"h5"/"hdf5"). - Returns: + Returns ----------- - res_df (Pandas dataframe): dataframe of the object data. + res_df : Pandas dataframe + dataframe of the object data. """ reader = OIFDataReader(filename, inputformat) diff --git a/src/sorcha/readers/ObjectDataReader.py b/src/sorcha/readers/ObjectDataReader.py index 323cce9f..c2c593ac 100755 --- a/src/sorcha/readers/ObjectDataReader.py +++ b/src/sorcha/readers/ObjectDataReader.py @@ -29,7 +29,8 @@ def __init__(self, cache_table=False, **kwargs): Parameters ---------- - cache_table (bool, optional): Indicates whether to keep the entire table in memory. + cache_table : bool, optional + Indicates whether to keep the entire table in memory. """ self._cache_table = cache_table self._table = None @@ -39,9 +40,10 @@ def get_reader_info(self): """Return a string identifying the current reader name and input information (for logging and output). - Returns: + Returns -------- - name (str): The reader information. + name : str + The reader information. """ pass # pragma: no cover @@ -49,20 +51,23 @@ def read_rows(self, block_start=0, block_size=None, **kwargs): """Reads in a set number of rows from the input, performs post-processing and validation, and returns a data frame. - Parameters: + Parameters ----------- - block_start (int, optional): The 0-indexed row number from which + block_start : int (optional) + The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header and return data starting on row=2. [Default=0] - block_size (int, optional): the number of rows to read in. + block_size : int (optional) + the number of rows to read in. Use block_size=None to read in all available data. [Default = None] - Returns: + Returns ----------- - res_df (Pandas dataframe): dataframe of the object data. + res_df : Pandas dataframe + dataframe of the object data. """ if self._cache_table: @@ -92,13 +97,15 @@ def read_objects(self, obj_ids, **kwargs): """Read in a chunk of data corresponding to all rows for a given set of object IDs. - Parameters: + Parameters ----------- - obj_ids (list): A list of object IDs to use. + obj_ids : list + A list of object IDs to use. - Returns: + Returns ----------- - res_df (Pandas dataframe): The dataframe for the object data. + res_df : Pandas dataframe + The dataframe for the object data. """ if self._cache_table: # Load the entire table the first time. @@ -121,13 +128,15 @@ def _validate_object_id_column(self, input_table): """Checks that the object ID column exists and converts it to a string. This is the common validity check for all object data tables. - Parameters: + Parameters ----------- - input_table (Pandas dataframe): A loaded table. + input_table : Pandas dataframe + A loaded table. - Returns: + Returns ----------- - input_table (Pandas dataframe): Returns the input dataframe modified in-place. + input_table : Pandas dataframe + Returns the input dataframe modified in-place. """ # Check that the ObjID column exists and convert it to a string. try: @@ -150,16 +159,18 @@ def _process_and_validate_input_table(self, input_table, **kwargs): input types. Subclasses should call super.process_and_validate() to ensure that the ancestor’s validation is also applied. - Parameters: + Parameters ----------- - input_table (Pandas dataframe): A loaded table. + input_table : Pandas dataframe + A loaded table. - disallow_nan (bool, optional): if True then checks the data for - NaNs or nulls. + disallow_nan : bool (optional) + if True then checks the data for NaNs or nulls. - Returns: + Returns ----------- - input_table (Pandas dataframe): Returns the input dataframe modified in-place. + input_table :Pandas dataframe + Returns the input dataframe modified in-place. """ input_table = self._validate_object_id_column(input_table) diff --git a/src/sorcha/readers/OrbitAuxReader.py b/src/sorcha/readers/OrbitAuxReader.py index 689c7f6c..17b17f46 100755 --- a/src/sorcha/readers/OrbitAuxReader.py +++ b/src/sorcha/readers/OrbitAuxReader.py @@ -11,13 +11,16 @@ class OrbitAuxReader(CSVDataReader): def __init__(self, filename, sep="csv", header=-1, **kwargs): """A class for reading the object data from a CSV file. - Parameters: + Parameters ----------- - filename (string): location/name of the data file. + filename : string + location/name of the data file. - sep (string, optional): format of input file ("whitespace"/"csv"). + sep : string, optional + format of input file ("whitespace"/"csv"). - header (int): The row number of the header. If not provided, does an automatic search. + header : int + The row number of the header. If not provided, does an automatic search. """ super().__init__(filename, sep, header, **kwargs) @@ -25,9 +28,10 @@ def get_reader_info(self): """Return a string identifying the current reader name and input information (for logging and output). - Returns: + Returns -------- - name (str): The reader information. + name : string + The reader information. """ return f"OrbitAuxReader:{self.filename}" @@ -35,19 +39,21 @@ def _process_and_validate_input_table(self, input_table, **kwargs): """Perform any input-specific processing and validation on the input table. Modifies the input dataframe in place. - Note - ---- + Notes + ------ The base implementation includes filtering that is common to most input types. Subclasses should call super.process_and_validate() to ensure that the ancestor’s validation is also applied. - Parameters: + Parameters ----------- - input_table (Pandas dataframe): A loaded table. + input_table : Pandas dataframe + A loaded table. - Returns: + Returns ----------- - res_df (Pandas dataframe): Returns the input dataframe modified in-place. + res_df : Pandas dataframe + Returns the input dataframe modified in-place. """ # Do standard CSV file processing super()._process_and_validate_input_table(input_table, **kwargs)